Skip to main content
Glama
testmoMCP.py34.8 kB
# cspell:ignore TESTMO testmo import os import re import httpx from fastmcp import FastMCP from typing import Optional, List, Dict, Any import json # Initialize the MCP Server mcp = FastMCP("Testmo MCP") # Configuration # Users should set these environment variables or pass them when running TESTMO_BASE_URL = os.getenv("TESTMO_BASE_URL") # e.g., "https://mycompany.testmo.net" TESTMO_TOKEN = os.getenv("TESTMO_TOKEN") if not TESTMO_BASE_URL or not TESTMO_TOKEN: print("Warning: TESTMO_BASE_URL or TESTMO_TOKEN environment variables are not set.") print("Please set them before running or ensure the AI client provides them.") def get_headers() -> Dict[str, str]: """Helper to construct auth headers.""" if not TESTMO_TOKEN: raise ValueError("TESTMO_TOKEN environment variable is missing.") return { "Authorization": f"Bearer {TESTMO_TOKEN}", "Content-Type": "application/json", "Accept": "application/json" } def clean_url(url: str) -> str: """Ensure URL ends with /api/v1 for the base.""" url = url.strip().rstrip("/") if not url.endswith("/api/v1"): url = f"{url}/api/v1" return url @mcp.tool() async def list_projects() -> str: """ List all projects in Testmo to get their IDs. Use this to find the 'project_id' needed for other tools. """ if not TESTMO_BASE_URL: return "Error: TESTMO_BASE_URL is not configured." url = f"{clean_url(TESTMO_BASE_URL)}/projects" async with httpx.AsyncClient() as client: try: response = await client.get(url, headers=get_headers()) response.raise_for_status() data = response.json() # Format output for easier AI reading projects = data.get("result", []) output = ["Found Projects:"] for p in projects: output.append(f"ID: {p['id']} | Name: {p['name']} | Active: {not p.get('is_completed')}") return "\n".join(output) except httpx.HTTPStatusError as e: return f"API Error: {e.response.status_code} - {e.response.text}" except Exception as e: return f"Error: {str(e)}" @mcp.tool() async def get_project_templates(project_id: int) -> str: """ Get templates available for a project. This is essential for creating test cases with steps. The 'Case (steps)' template allows step-by-step test cases with expected results. Returns template IDs needed for create_test_case. """ if not TESTMO_BASE_URL: return "Error: TESTMO_BASE_URL is not configured." # Get cases with expands to retrieve templates url = f"{clean_url(TESTMO_BASE_URL)}/projects/{project_id}/cases?expands=templates&limit=1" async with httpx.AsyncClient() as client: try: response = await client.get(url, headers=get_headers()) response.raise_for_status() data = response.json() templates = data.get("expands", {}).get("templates", []) if not templates: return "No templates found. Check if the project exists and has templates configured." output = [f"Templates for Project {project_id}:"] output.append("-" * 50) for t in templates: default_str = " (DEFAULT)" if t.get('is_default') else "" output.append(f"ID: {t['id']} | Name: {t['name']}{default_str}") output.append("-" * 50) output.append("\nNOTE: For test cases with steps, use the 'Case (steps)' template.") output.append("Pass the template_id when creating test cases to use steps.") return "\n".join(output) except httpx.HTTPStatusError as e: return f"API Error: {e.response.status_code} - {e.response.text}" except Exception as e: return f"Error: {str(e)}" @mcp.tool() async def get_project_folders(project_id: int) -> str: """ List folders (repositories) within a project to verify where to place a test case. Returns ID and Name of folders. """ if not TESTMO_BASE_URL: return "Error: TESTMO_BASE_URL is not configured." # Note: Testmo often treats 'folders' as the organization structure for cases url = f"{clean_url(TESTMO_BASE_URL)}/projects/{project_id}/folders" async with httpx.AsyncClient() as client: try: response = await client.get(url, headers=get_headers()) response.raise_for_status() data = response.json() folders = data.get("result", []) if not folders: return "No folders found in this project." output = [f"Folders for Project {project_id}:"] for f in folders: output.append(f"ID: {f['id']} | Name: {f['name']} | Parent ID: {f.get('parent_id')}") return "\n".join(output) except Exception as e: return f"Error fetching folders: {str(e)}" @mcp.tool() async def create_folder( project_id: int, name: str, parent_id: Optional[int] = None ) -> str: """ Create a new folder in a Testmo project to organize test cases. Args: project_id: The ID of the project. name: The name of the folder to create. parent_id: Optional parent folder ID (for nested folders). Returns: The created folder's ID and name. """ if not TESTMO_BASE_URL: return "Error: TESTMO_BASE_URL is not configured." url = f"{clean_url(TESTMO_BASE_URL)}/projects/{project_id}/folders" folder_data = {"name": name} if parent_id: folder_data["parent_id"] = parent_id # Testmo API expects folders wrapped in a "folders" array request_body = {"folders": [folder_data]} async with httpx.AsyncClient() as client: try: response = await client.post(url, json=request_body, headers=get_headers()) response.raise_for_status() results = response.json().get("result", []) if results and len(results) > 0: result = results[0] return f"Success! Folder Created.\nID: {result.get('id')}\nName: {result.get('name')}" return "Folder created but no result returned." except httpx.HTTPStatusError as e: return f"Failed to create folder: {e.response.text}" except Exception as e: return f"Error: {str(e)}" def parse_steps_to_testmo_format(steps_text: str) -> List[Dict[str, str]]: """ Parse steps text into Testmo's required format. Testmo expects steps in this EXACT format: [ {"text1": "<p>step content</p>", "text3": "<p>expected result</p>"}, ... ] Where: - text1 = step description (wrapped in <p> tags) - text3 = expected result (wrapped in <p> tags) Accepts various input formats: 1. Numbered steps with | separator: "1. Do something | Expected: Something happens" 2. Numbered steps with expected on next line: "1. Do something Expected: Something happens" 3. Simple numbered list: "1. Do something 2. Do another thing" """ steps_list = [] lines = steps_text.strip().split('\n') current_step = None step_content = "" step_expected = "" for line in lines: line = line.strip() if not line: continue # Check if this is a new step (starts with number) step_match = re.match(r'^[\d]+[.\)]\s*(.+)', line) if step_match: # Save previous step if exists if current_step is not None: # Testmo format: text1 for step, text3 for expected, wrapped in <p> tags step_obj = { "text1": f"<p>{step_content}</p>", "text3": f"<p>{step_expected}</p>" if step_expected else "<p></p>" } steps_list.append(step_obj) step_content = step_match.group(1).strip() step_expected = "" current_step = True # Check if there's an expected result in the same line (| separator) if ' | Expected:' in step_content: parts = step_content.split(' | Expected:', 1) step_content = parts[0].strip() step_expected = parts[1].strip() if len(parts) > 1 else "" elif ' | ' in step_content: parts = step_content.split(' | ', 1) step_content = parts[0].strip() step_expected = parts[1].strip() if len(parts) > 1 else "" elif current_step is not None: # This might be an expected result on a separate line if line.lower().startswith('expected:'): step_expected = line[9:].strip() elif line.lower().startswith('expected result:'): step_expected = line[16:].strip() else: # Append to content if it's a continuation step_content += " " + line # Don't forget the last step if current_step is not None: step_obj = { "text1": f"<p>{step_content}</p>", "text3": f"<p>{step_expected}</p>" if step_expected else "<p></p>" } steps_list.append(step_obj) return steps_list @mcp.tool() async def create_test_case( project_id: int, title: str, description: str, steps: Optional[str] = None, expected_result: Optional[str] = None, folder_id: Optional[int] = None, priority_id: Optional[int] = None, template_id: Optional[int] = None, estimate: Optional[int] = None ) -> str: """ Create a new Test Case in Testmo with proper steps formatting. Args: project_id: The ID of the project (use list_projects to find this). title: The name/summary of the test case. description: Preconditions or general description of the test. steps: Test steps as numbered list. Format each step as: "1. Step description | Expected: expected result" Or just "1. Step description" without expected result per step. expected_result: Overall expected outcome. folder_id: Optional ID of the folder to place the case in. priority_id: Optional priority ID (1=Low, 2=Medium, 3=High, 4=Critical). template_id: Optional template ID. Use get_project_templates to find the 'Case (steps)' template ID. estimate: Optional estimated execution time in minutes (e.g., 60 for 1 hour, 480 for 8 hours). Example steps format: "1. Navigate to login page | Expected: Login page loads 2. Enter valid credentials | Expected: Fields accept input 3. Click Login button | Expected: User is logged in" """ if not TESTMO_BASE_URL: return "Error: TESTMO_BASE_URL is not configured." url = f"{clean_url(TESTMO_BASE_URL)}/projects/{project_id}/cases" # Construct Payload with Testmo's expected field names payload = { "name": title, } # Add optional fields if folder_id: payload["folder_id"] = folder_id if template_id: payload["template_id"] = template_id # Estimate - pass as minutes directly (Testmo stores in minutes) if estimate: payload["estimate"] = estimate # Priority - use custom_priority field if priority_id: payload["custom_priority"] = priority_id # Description if description: payload["custom_description"] = description # Parse and add steps in Testmo's required format # Format: [{"text1": "<p>step</p>", "text3": "<p>expected</p>"}, ...] if steps: parsed_steps = parse_steps_to_testmo_format(steps) # If we have an overall expected_result but steps don't have individual ones, # add it to the last step if expected_result and parsed_steps: last_step = parsed_steps[-1] if last_step.get("text3") == "<p></p>": last_step["text3"] = f"<p>{expected_result}</p>" payload["custom_steps"] = parsed_steps elif expected_result: # No steps but have expected result - add to description if description: payload["custom_description"] = f"{description}\n\nExpected Result: {expected_result}" else: payload["custom_description"] = f"Expected Result: {expected_result}" # Testmo API expects cases to be wrapped in a "cases" array request_body = {"cases": [payload]} async with httpx.AsyncClient() as client: try: response = await client.post(url, json=request_body, headers=get_headers()) response.raise_for_status() results = response.json().get("result", []) if results and len(results) > 0: result = results[0] case_id = result.get('id') return f"Success! Test Case Created.\nID: {case_id}\nName: {result.get('name')}\nLink: {TESTMO_BASE_URL}/cases/view/{case_id}" return "Test case created but no result returned." except httpx.HTTPStatusError as e: error_msg = f"Failed to create test case: {e.response.text}" # Add debug info error_msg += f"\n\nDebug - Request payload sent:\n{json.dumps(request_body, indent=2)}" return error_msg except Exception as e: return f"Error: {str(e)}" @mcp.tool() async def create_test_case_with_steps_json( project_id: int, title: str, steps_json: str, description: Optional[str] = None, folder_id: Optional[int] = None, priority_id: Optional[int] = None, template_id: Optional[int] = None, estimate: Optional[int] = None ) -> str: """ Create a test case with steps provided as JSON array for more control. Use this when you need precise control over step formatting. Args: project_id: The ID of the project. title: The name/summary of the test case. steps_json: JSON array of steps in Testmo format. Each step needs 'text1' (step) and 'text3' (expected). Example: '[{"text1": "<p>Click login</p>", "text3": "<p>Page loads</p>"}]' Or simpler: '[{"content": "Click login", "expected": "Page loads"}]' (will be converted) description: Optional description/preconditions. folder_id: Optional folder ID. priority_id: Optional priority (1=Low, 2=Medium, 3=High, 4=Critical). template_id: Template ID (use get_project_templates to find 'Case (steps)' template). estimate: Estimated time in minutes (e.g., 60 for 1 hour, 480 for 8 hours/1 day). """ if not TESTMO_BASE_URL: return "Error: TESTMO_BASE_URL is not configured." try: steps = json.loads(steps_json) except json.JSONDecodeError as e: return f"Invalid JSON for steps: {str(e)}" # Convert simple format to Testmo format if needed formatted_steps = [] for step in steps: if "text1" in step: # Already in Testmo format formatted_steps.append(step) else: # Convert from simple format content = step.get("content", step.get("step", step.get("description", ""))) expected = step.get("expected", step.get("expected_result", step.get("result", ""))) formatted_steps.append({ "text1": f"<p>{content}</p>", "text3": f"<p>{expected}</p>" if expected else "<p></p>" }) url = f"{clean_url(TESTMO_BASE_URL)}/projects/{project_id}/cases" payload = { "name": title, "custom_steps": formatted_steps } if folder_id: payload["folder_id"] = folder_id if template_id: payload["template_id"] = template_id if estimate: payload["estimate"] = estimate if priority_id: payload["custom_priority"] = priority_id if description: payload["custom_description"] = description request_body = {"cases": [payload]} async with httpx.AsyncClient() as client: try: response = await client.post(url, json=request_body, headers=get_headers()) response.raise_for_status() results = response.json().get("result", []) if results and len(results) > 0: result = results[0] case_id = result.get('id') return f"Success! Test Case Created.\nID: {case_id}\nName: {result.get('name')}\nLink: {TESTMO_BASE_URL}/cases/view/{case_id}" return "Test case created but no result returned." except httpx.HTTPStatusError as e: error_msg = f"Failed to create test case: {e.response.text}" error_msg += f"\n\nDebug - Request payload sent:\n{json.dumps(request_body, indent=2)}" return error_msg except Exception as e: return f"Error: {str(e)}" @mcp.tool() async def get_test_case(project_id: int, case_id: int) -> str: """ Get details of a specific test case including its steps and custom fields. Useful for verifying created test cases or understanding the structure. Args: project_id: The project ID. case_id: The test case ID. """ if not TESTMO_BASE_URL: return "Error: TESTMO_BASE_URL is not configured." # Testmo API doesn't have a single case endpoint, so we fetch cases and filter # Use sort by ID descending and limit to optimize when looking for recent cases url = f"{clean_url(TESTMO_BASE_URL)}/projects/{project_id}/cases?expands=templates,folders&limit=100&sort=repository_cases:id&order=desc" async with httpx.AsyncClient() as client: try: response = await client.get(url, headers=get_headers()) response.raise_for_status() data = response.json() cases = data.get("result", []) # Find the specific case by ID case = None for c in cases: if isinstance(c, dict) and c.get('id') == case_id: case = c break if not case: return f"Test case with ID {case_id} not found in the first 100 results." # Build output output = [f"Test Case Details (ID: {case_id}):"] output.append("-" * 50) output.append(f"Name: {case.get('name')}") output.append(f"Template ID: {case.get('template_id')}") output.append(f"Folder ID: {case.get('folder_id')}") output.append(f"State ID: {case.get('state_id')}") # Format estimate nicely est = case.get('estimate') if est: hours = est // 60 mins = est % 60 if hours > 0 and mins > 0: est_str = f"{hours}h {mins}m" elif hours > 0: est_str = f"{hours}h" else: est_str = f"{mins}m" output.append(f"Estimate: {est_str} ({est} minutes)") else: output.append(f"Estimate: Not set") output.append(f"Created: {case.get('created_at')}") output.append(f"Updated: {case.get('updated_at')}") # Show custom fields if present custom_fields = [] for key, value in case.items(): if key.startswith('custom_'): custom_fields.append(f" {key}: {value}") if custom_fields: output.append("-" * 50) output.append("Custom Fields:") output.extend(custom_fields) output.append("-" * 50) # Include raw JSON for debugging output.append("\nFull case data (JSON):") output.append(json.dumps(case, indent=2)) return "\n".join(output) except httpx.HTTPStatusError as e: return f"API Error: {e.response.status_code} - {e.response.text}" except Exception as e: return f"Error: {str(e)}" @mcp.tool() async def list_test_cases( project_id: int, folder_id: Optional[int] = None, limit: int = 25 ) -> str: """ List test cases in a project, optionally filtered by folder. Args: project_id: The project ID. folder_id: Optional folder ID to filter cases. limit: Maximum number of cases to return (default 25). """ if not TESTMO_BASE_URL: return "Error: TESTMO_BASE_URL is not configured." url = f"{clean_url(TESTMO_BASE_URL)}/projects/{project_id}/cases?limit={limit}" if folder_id: url += f"&folder_id={folder_id}" async with httpx.AsyncClient() as client: try: response = await client.get(url, headers=get_headers()) response.raise_for_status() data = response.json() cases = data.get("result", []) if not cases: return "No test cases found." # Handle page info safely page_info = data.get("page") if isinstance(page_info, dict): total = page_info.get('total', '?') else: total = '?' output = [f"Test Cases (showing {len(cases)} of {total}):"] output.append("-" * 50) for c in cases: # Handle case where c might be just an ID or a full object if isinstance(c, dict): output.append(f"ID: {c.get('id')} | Key: {c.get('key')} | Name: {c.get('name')}") else: output.append(f"ID: {c}") return "\n".join(output) except httpx.HTTPStatusError as e: return f"API Error: {e.response.status_code} - {e.response.text}" except Exception as e: return f"Error: {str(e)}" @mcp.tool() async def update_test_case( project_id: int, case_id: int, name: Optional[str] = None, folder_id: Optional[int] = None, state_id: Optional[int] = None, estimate: Optional[int] = None ) -> str: """ Update an existing test case's basic fields. Args: project_id: The project ID. case_id: The test case ID to update. name: New name for the test case. folder_id: New folder ID. state_id: New state ID. estimate: New estimate in minutes (e.g., 60 for 1 hour, 480 for 8 hours/1 day). """ if not TESTMO_BASE_URL: return "Error: TESTMO_BASE_URL is not configured." url = f"{clean_url(TESTMO_BASE_URL)}/projects/{project_id}/cases" payload = {"ids": [case_id]} if name: payload["name"] = name if folder_id is not None: payload["folder_id"] = folder_id if state_id is not None: payload["state_id"] = state_id if estimate is not None: payload["estimate"] = estimate async with httpx.AsyncClient() as client: try: response = await client.patch(url, json=payload, headers=get_headers()) response.raise_for_status() results = response.json().get("result", []) if results: result = results[0] return f"Success! Test Case Updated.\nID: {result.get('id')}\nName: {result.get('name')}" return "Test case updated but no result returned." except httpx.HTTPStatusError as e: return f"Failed to update test case: {e.response.text}" except Exception as e: return f"Error: {str(e)}" @mcp.tool() async def delete_test_cases(project_id: int, case_ids: str) -> str: """ Delete one or more test cases. Args: project_id: The project ID. case_ids: Comma-separated list of case IDs to delete. """ if not TESTMO_BASE_URL: return "Error: TESTMO_BASE_URL is not configured." try: ids = [int(id.strip()) for id in case_ids.split(",")] except ValueError: return "Invalid case_ids format. Please provide comma-separated integers." url = f"{clean_url(TESTMO_BASE_URL)}/projects/{project_id}/cases" payload = {"ids": ids} async with httpx.AsyncClient() as client: try: response = await client.request("DELETE", url, json=payload, headers=get_headers()) response.raise_for_status() return f"Success! Deleted {len(ids)} test case(s)." except httpx.HTTPStatusError as e: return f"Failed to delete test cases: {e.response.text}" except Exception as e: return f"Error: {str(e)}" @mcp.tool() async def create_manual_test_run( project_id: int, name: str, description: Optional[str] = None, milestone_id: Optional[int] = None ) -> str: """ Create a new Manual Test Run to execute tests. NOTE: The Testmo REST API v1 does not support creating manual test runs via API. Manual test runs must be created through the Testmo web UI. This function is included for future API versions that may support this feature. """ return """Error: The Testmo REST API v1 does not support creating manual test runs. Manual test runs can only be created through the Testmo web interface: 1. Go to your project in Testmo 2. Click on "Test Runs" 3. Click "Add Test Run" button 4. Configure and create your test run For automation runs, use the Testmo CLI tool: https://docs.testmo.com/docs/automation/cli""" @mcp.tool() async def list_test_runs(project_id: int, limit: int = 25) -> str: """ List test runs in a project. Args: project_id: The project ID. limit: Maximum number of runs to return. """ if not TESTMO_BASE_URL: return "Error: TESTMO_BASE_URL is not configured." url = f"{clean_url(TESTMO_BASE_URL)}/projects/{project_id}/runs?limit={limit}" async with httpx.AsyncClient() as client: try: response = await client.get(url, headers=get_headers()) response.raise_for_status() data = response.json() runs = data.get("result", []) if not runs: return "No test runs found." output = ["Test Runs:"] output.append("-" * 50) for r in runs: output.append(f"ID: {r['id']} | Name: {r['name']} | Status: {r.get('status_id')}") return "\n".join(output) except httpx.HTTPStatusError as e: return f"API Error: {e.response.status_code} - {e.response.text}" except Exception as e: return f"Error: {str(e)}" @mcp.tool() async def get_milestones(project_id: int) -> str: """ Get milestones for a project. Use milestone IDs when creating test runs. """ if not TESTMO_BASE_URL: return "Error: TESTMO_BASE_URL is not configured." url = f"{clean_url(TESTMO_BASE_URL)}/projects/{project_id}/milestones" async with httpx.AsyncClient() as client: try: response = await client.get(url, headers=get_headers()) response.raise_for_status() data = response.json() milestones = data.get("result", []) if not milestones: return "No milestones found in this project." output = [f"Milestones for Project {project_id}:"] output.append("-" * 50) for m in milestones: completed = " (Completed)" if m.get('is_completed') else "" output.append(f"ID: {m['id']} | Name: {m['name']}{completed}") return "\n".join(output) except httpx.HTTPStatusError as e: return f"API Error: {e.response.status_code} - {e.response.text}" except Exception as e: return f"Error: {str(e)}" @mcp.tool() async def debug_api_test(project_id: int) -> str: """ Debug tool to test API connectivity and retrieve template/field information. Use this to troubleshoot issues with test case creation. """ if not TESTMO_BASE_URL: return "Error: TESTMO_BASE_URL is not configured." output = ["=== Testmo API Debug Information ===\n"] output.append(f"Base URL: {TESTMO_BASE_URL}") output.append(f"API Endpoint: {clean_url(TESTMO_BASE_URL)}") output.append("") async with httpx.AsyncClient() as client: # Test basic connectivity try: url = f"{clean_url(TESTMO_BASE_URL)}/projects/{project_id}" response = await client.get(url, headers=get_headers()) response.raise_for_status() project = response.json().get("result", {}) output.append(f"✓ Project found: {project.get('name')} (ID: {project_id})") except Exception as e: output.append(f"✗ Project access failed: {str(e)}") return "\n".join(output) # Get templates try: url = f"{clean_url(TESTMO_BASE_URL)}/projects/{project_id}/cases?expands=templates&limit=1" response = await client.get(url, headers=get_headers()) response.raise_for_status() data = response.json() templates = data.get("expands", {}).get("templates", []) output.append(f"\n✓ Templates found: {len(templates)}") for t in templates: default = " [DEFAULT]" if t.get('is_default') else "" output.append(f" - ID {t['id']}: {t['name']}{default}") except Exception as e: output.append(f"✗ Template fetch failed: {str(e)}") # Get folders try: url = f"{clean_url(TESTMO_BASE_URL)}/projects/{project_id}/folders" response = await client.get(url, headers=get_headers()) response.raise_for_status() folders = response.json().get("result", []) output.append(f"\n✓ Folders found: {len(folders)}") for f in folders[:5]: # Show first 5 output.append(f" - ID {f['id']}: {f['name']}") if len(folders) > 5: output.append(f" ... and {len(folders) - 5} more") except Exception as e: output.append(f"✗ Folder fetch failed: {str(e)}") output.append("\n=== Recommendations ===") output.append("1. Use get_project_templates to find the 'Case (steps)' template ID") output.append("2. Always specify template_id when creating test cases with steps") output.append("3. Steps should be formatted as: '1. Step | Expected: Result'") # Get a sample test case to see actual field structure try: url = f"{clean_url(TESTMO_BASE_URL)}/projects/{project_id}/cases?limit=1&sort=repository_cases:id&order=desc" response = await client.get(url, headers=get_headers()) response.raise_for_status() data = response.json() cases = data.get("result", []) if cases and isinstance(cases[0], dict): output.append("\n=== Sample Test Case Structure ===") sample_case = cases[0] # Show all custom fields custom_fields = {k: v for k, v in sample_case.items() if k.startswith('custom_')} if custom_fields: output.append("Custom fields found in existing case:") for k, v in custom_fields.items(): output.append(f" {k}: {type(v).__name__} = {json.dumps(v)[:100]}") except Exception as e: output.append(f"\n✗ Sample case fetch failed: {str(e)}") return "\n".join(output) @mcp.tool() async def create_test_case_raw( project_id: int, payload_json: str ) -> str: """ EXPERIMENTAL: Create a test case with a raw JSON payload. Use this to experiment with different field names and formats. Args: project_id: The project ID. payload_json: Raw JSON object for the test case. Must include at least 'name'. Example payload_json: '{ "name": "Test Case Title", "template_id": 2, "folder_id": 17, "custom_description": "Description text", "custom_steps": [{"content": "Step 1", "expected": "Result 1"}] }' This tool will wrap your payload in the required {"cases": [...]} structure. """ if not TESTMO_BASE_URL: return "Error: TESTMO_BASE_URL is not configured." try: payload = json.loads(payload_json) except json.JSONDecodeError as e: return f"Invalid JSON payload: {str(e)}" if "name" not in payload: return "Error: payload must include 'name' field" url = f"{clean_url(TESTMO_BASE_URL)}/projects/{project_id}/cases" request_body = {"cases": [payload]} async with httpx.AsyncClient() as client: try: response = await client.post(url, json=request_body, headers=get_headers()) response.raise_for_status() results = response.json().get("result", []) output = [] if results and len(results) > 0: result = results[0] case_id = result.get('id') output.append(f"Success! Test Case Created.") output.append(f"ID: {case_id}") output.append(f"Link: {TESTMO_BASE_URL}/cases/view/{case_id}") output.append(f"\nResponse data:") output.append(json.dumps(result, indent=2)) else: output.append("Test case created but no result returned.") output.append(f"\n--- Request sent ---") output.append(json.dumps(request_body, indent=2)) return "\n".join(output) except httpx.HTTPStatusError as e: error_output = [f"Failed to create test case: {e.response.text}"] error_output.append(f"\n--- Request sent ---") error_output.append(json.dumps(request_body, indent=2)) return "\n".join(error_output) except Exception as e: return f"Error: {str(e)}" if __name__ == "__main__": mcp.run()

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/filipljoljic/Testmo-MCP'

If you have feedback or need assistance with the MCP directory API, please join our Discord server