Skip to main content
Glama
kaman05010

MCP Wikipedia Server

by kaman05010
run_tests.py17.1 kB
#!/usr/bin/env python3 """ Test Runner for MCP Wikipedia Server. This script provides a unified interface to run all tests in the project, from unit tests to integration and performance tests. """ import asyncio import sys import os import subprocess import time import argparse from typing import Dict, Any, List, Optional # Add src to path for imports sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'src')) class TestRunner: """Unified test runner for all Wikipedia server tests.""" def __init__(self): self.test_directory = os.path.dirname(__file__) self.project_root = os.path.dirname(self.test_directory) self.results = {} def run_pytest_tests(self, test_file: str = None, verbose: bool = True) -> Dict[str, Any]: """Run pytest-based unit tests.""" print(f"\n🧪 Running Unit Tests (pytest)...") try: # Check if pytest is available subprocess.run(["python", "-m", "pytest", "--version"], capture_output=True, check=True) except (subprocess.CalledProcessError, FileNotFoundError): print("⚠️ pytest not available. Install with: pip install pytest") return {"success": False, "error": "pytest not available"} # Build pytest command cmd = ["python", "-m", "pytest"] if test_file: cmd.append(os.path.join(self.test_directory, test_file)) else: cmd.append(os.path.join(self.test_directory, "test_server.py")) if verbose: cmd.extend(["-v", "-s"]) # Add coverage if available try: subprocess.run(["python", "-m", "pytest_cov", "--version"], capture_output=True, check=True) cmd.extend(["--cov=src", "--cov-report=term-missing"]) except (subprocess.CalledProcessError, FileNotFoundError): pass # Coverage not available, continue without it print(f"Running: {' '.join(cmd)}") start_time = time.time() result = subprocess.run(cmd, capture_output=True, text=True, cwd=self.project_root) duration = time.time() - start_time success = result.returncode == 0 print(f"\n{'✅' if success else '❌'} Unit Tests: {'PASSED' if success else 'FAILED'}") print(f"Duration: {duration:.2f}s") if not success: print("\nSTDOUT:") print(result.stdout) print("\nSTDERR:") print(result.stderr) return { "success": success, "duration": duration, "stdout": result.stdout, "stderr": result.stderr, "returncode": result.returncode } async def run_performance_tests(self) -> Dict[str, Any]: """Run performance benchmarks.""" print(f"\n⚡ Running Performance Tests...") try: # Import and run performance tests from test_performance import run_performance_benchmarks start_time = time.time() await run_performance_benchmarks() duration = time.time() - start_time print(f"\n✅ Performance Tests: COMPLETED") print(f"Duration: {duration:.2f}s") return { "success": True, "duration": duration } except Exception as e: print(f"\n❌ Performance Tests: FAILED") print(f"Error: {e}") return { "success": False, "error": str(e) } async def run_integration_tests(self) -> Dict[str, Any]: """Run integration tests.""" print(f"\n🔗 Running Integration Tests...") try: # Import and run integration tests from test_integration import run_integration_tests start_time = time.time() results = await run_integration_tests() duration = time.time() - start_time success = results.get("overall_success", False) print(f"\n{'✅' if success else '❌'} Integration Tests: {'PASSED' if success else 'FAILED'}") print(f"Duration: {duration:.2f}s") return { "success": success, "duration": duration, "detailed_results": results } except Exception as e: print(f"\n❌ Integration Tests: FAILED") print(f"Error: {e}") return { "success": False, "error": str(e) } async def run_mcp_compliance_tests(self) -> Dict[str, Any]: """Run MCP protocol compliance tests.""" print(f"\n🔌 Running MCP Compliance Tests...") try: # Import and run MCP compliance tests from test_mcp_compliance import run_mcp_compliance_tests start_time = time.time() results = await run_mcp_compliance_tests() duration = time.time() - start_time success = results.get("overall_success", False) print(f"\n{'✅' if success else '❌'} MCP Compliance: {'PASSED' if success else 'FAILED'}") print(f"Duration: {duration:.2f}s") return { "success": success, "duration": duration, "detailed_results": results } except Exception as e: print(f"\n❌ MCP Compliance Tests: FAILED") print(f"Error: {e}") return { "success": False, "error": str(e) } def run_manual_server_test(self) -> Dict[str, Any]: """Run a quick manual server test.""" print(f"\n🔧 Running Manual Server Test...") try: server_path = os.path.join(self.project_root, "src", "mcp_server", "mcp_server.py") # Try to start server with test flag result = subprocess.run( [sys.executable, server_path, "--test"], capture_output=True, text=True, timeout=30, cwd=self.project_root ) success = result.returncode == 0 print(f"\n{'✅' if success else '❌'} Manual Server Test: {'PASSED' if success else 'FAILED'}") if not success: print("STDOUT:", result.stdout) print("STDERR:", result.stderr) return { "success": success, "stdout": result.stdout, "stderr": result.stderr, "returncode": result.returncode } except subprocess.TimeoutExpired: print(f"\n⚠️ Manual Server Test: TIMEOUT") return { "success": False, "error": "Test timed out after 30 seconds" } except Exception as e: print(f"\n❌ Manual Server Test: FAILED") print(f"Error: {e}") return { "success": False, "error": str(e) } def check_dependencies(self) -> Dict[str, Any]: """Check if all required dependencies are available.""" print(f"\n🔍 Checking Dependencies...") dependencies = { "python": {"cmd": ["python", "--version"], "required": True}, "wikipedia": {"cmd": ["python", "-c", "import wikipedia; print('OK')"], "required": True}, "mcp": {"cmd": ["python", "-c", "import mcp; print('OK')"], "required": True}, "fastmcp": {"cmd": ["python", "-c", "import fastmcp; print('OK')"], "required": True}, "pytest": {"cmd": ["python", "-m", "pytest", "--version"], "required": False}, "pytest-cov": {"cmd": ["python", "-m", "pytest_cov", "--version"], "required": False}, "asyncio": {"cmd": ["python", "-c", "import asyncio; print('OK')"], "required": True} } results = {} all_required_available = True for name, config in dependencies.items(): try: result = subprocess.run( config["cmd"], capture_output=True, text=True, timeout=10 ) available = result.returncode == 0 results[name] = { "available": available, "required": config["required"], "version": result.stdout.strip() if available else None } if config["required"] and not available: all_required_available = False status = "✅" if available else ("❌" if config["required"] else "⚠️") req_text = " (required)" if config["required"] else " (optional)" print(f" {status} {name}{req_text}") except Exception as e: results[name] = { "available": False, "required": config["required"], "error": str(e) } if config["required"]: all_required_available = False status = "❌" if config["required"] else "⚠️" req_text = " (required)" if config["required"] else " (optional)" print(f" {status} {name}{req_text} - Error: {e}") print(f"\n{'✅' if all_required_available else '❌'} Dependencies: {'All required available' if all_required_available else 'Missing required dependencies'}") return { "all_required_available": all_required_available, "dependencies": results } async def run_all_tests(self, include_unit: bool = True, include_integration: bool = True, include_performance: bool = True, include_mcp: bool = True, include_manual: bool = True) -> Dict[str, Any]: """Run all specified test suites.""" print("🚀 Starting Comprehensive Test Suite") print("="*50) overall_start = time.time() test_results = {} # Check dependencies first dep_results = self.check_dependencies() test_results["dependencies"] = dep_results if not dep_results["all_required_available"]: print("\n❌ Cannot run tests: Missing required dependencies") return { "overall_success": False, "results": test_results, "error": "Missing required dependencies" } # Run manual server test first (quick validation) if include_manual: manual_results = self.run_manual_server_test() test_results["manual"] = manual_results # Run unit tests if include_unit: unit_results = self.run_pytest_tests() test_results["unit"] = unit_results # Run integration tests if include_integration: integration_results = await self.run_integration_tests() test_results["integration"] = integration_results # Run MCP compliance tests if include_mcp: mcp_results = await self.run_mcp_compliance_tests() test_results["mcp_compliance"] = mcp_results # Run performance tests last (they take the longest) if include_performance: performance_results = await self.run_performance_tests() test_results["performance"] = performance_results overall_duration = time.time() - overall_start # Calculate overall success test_successes = [] for test_type, results in test_results.items(): if test_type == "dependencies": continue test_successes.append(results.get("success", False)) overall_success = all(test_successes) if test_successes else False return { "overall_success": overall_success, "overall_duration": overall_duration, "tests_run": len(test_successes), "tests_passed": sum(test_successes), "results": test_results } def print_final_summary(self, results: Dict[str, Any]): """Print final test summary.""" print("\n" + "="*50) print("📊 FINAL TEST SUMMARY") print("="*50) overall_success = results.get("overall_success", False) duration = results.get("overall_duration", 0) tests_run = results.get("tests_run", 0) tests_passed = results.get("tests_passed", 0) print(f"Overall Result: {'✅ SUCCESS' if overall_success else '❌ FAILURE'}") print(f"Total Duration: {duration:.2f}s") print(f"Tests Passed: {tests_passed}/{tests_run}") # Print individual test results test_results = results.get("results", {}) for test_type, test_result in test_results.items(): if test_type == "dependencies": continue success = test_result.get("success", False) test_duration = test_result.get("duration", 0) status = "✅" if success else "❌" print(f"{status} {test_type.replace('_', ' ').title()}: {test_duration:.2f}s") # Print recommendations if not overall_success: print("\n💡 Recommendations:") for test_type, test_result in test_results.items(): if test_type == "dependencies": continue if not test_result.get("success", True): if "error" in test_result: print(f" • Fix {test_type} error: {test_result['error']}") else: print(f" • Review {test_type} test failures") print(f"\n{'🎉 All tests passed! Ready for production.' if overall_success else '⚠️ Some tests need attention before deployment.'}") async def main(): """Main function with command line interface.""" parser = argparse.ArgumentParser(description="Run MCP Wikipedia Server tests") parser.add_argument("--unit", action="store_true", help="Run unit tests only") parser.add_argument("--integration", action="store_true", help="Run integration tests only") parser.add_argument("--performance", action="store_true", help="Run performance tests only") parser.add_argument("--mcp", action="store_true", help="Run MCP compliance tests only") parser.add_argument("--manual", action="store_true", help="Run manual server test only") parser.add_argument("--quick", action="store_true", help="Run quick tests only (unit + manual)") parser.add_argument("--all", action="store_true", help="Run all tests (default)") parser.add_argument("--deps", action="store_true", help="Check dependencies only") args = parser.parse_args() runner = TestRunner() # Handle special cases if args.deps: runner.check_dependencies() return # Determine which tests to run if args.quick: include_unit = True include_integration = False include_performance = False include_mcp = False include_manual = True elif any([args.unit, args.integration, args.performance, args.mcp, args.manual]): include_unit = args.unit include_integration = args.integration include_performance = args.performance include_mcp = args.mcp include_manual = args.manual else: # Default: run all tests include_unit = True include_integration = True include_performance = True include_mcp = True include_manual = True # Run tests results = await runner.run_all_tests( include_unit=include_unit, include_integration=include_integration, include_performance=include_performance, include_mcp=include_mcp, include_manual=include_manual ) # Print summary runner.print_final_summary(results) # Exit with appropriate code sys.exit(0 if results.get("overall_success", False) else 1) if __name__ == "__main__": asyncio.run(main())

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/kaman05010/MCPClientServer'

If you have feedback or need assistance with the MCP directory API, please join our Discord server