Skip to main content
Glama

Katamari MCP Server

by ciphernaut
testing.pyโ€ข12.7 kB
""" Parallel testing environment for safe ACP operations. """ import asyncio import logging import tempfile import shutil from typing import Dict, List, Optional, Any from pathlib import Path logger = logging.getLogger(__name__) class ParallelTester: """Parallel testing environment for safe capability testing.""" def __init__(self): self.test_environments: Dict[str, Path] = {} self.active_tests: Dict[str, asyncio.Task] = {} async def create_sandbox(self, test_id: str, environment_type: str = "minimal") -> str: """Create isolated environment for testing.""" sandbox_path = Path(tempfile.mkdtemp(prefix=f"acp_test_{test_id}_")) try: # Copy necessary files to sandbox if environment_type == "minimal": await self._setup_minimal_sandbox(sandbox_path) elif environment_type == "realistic": await self._setup_realistic_sandbox(sandbox_path) elif environment_type == "comprehensive": await self._setup_comprehensive_sandbox(sandbox_path) self.test_environments[test_id] = sandbox_path logger.info(f"Created sandbox environment: {sandbox_path}") return str(sandbox_path) except Exception as e: # Cleanup on failure if sandbox_path.exists(): shutil.rmtree(sandbox_path) raise e async def test_capability(self, capability: Dict[str, Any]) -> Dict[str, Any]: """Test new or modified capability in isolation.""" test_id = capability.get("name", "unknown") try: # Create sandbox sandbox_path_str = await self.create_sandbox(test_id, "realistic") sandbox_path = Path(sandbox_path_str) # Deploy capability to sandbox deploy_result = await self._deploy_to_sandbox(capability, sandbox_path) if not deploy_result["success"]: return { "success": False, "issues": [f"Deployment failed: {deploy_result['error']}"], "test_id": test_id } # Run tests test_results = await self._run_capability_tests(capability, sandbox_path) # Cleanup await self._cleanup_sandbox(test_id) return { "success": test_results["all_passed"], "issues": test_results["failed_tests"], "test_id": test_id, "test_results": test_results } except Exception as e: await self._cleanup_sandbox(test_id) return { "success": False, "issues": [f"Test execution failed: {str(e)}"], "test_id": test_id } async def validate_core_changes(self, changes: Dict[str, Any]) -> Dict[str, Any]: """Test core router and security changes.""" test_id = f"core_{changes.get('target', 'unknown')}" try: # Create comprehensive sandbox for core changes sandbox_path_str = await self.create_sandbox(test_id, "comprehensive") sandbox_path = Path(sandbox_path_str) # Apply core changes apply_result = await self._apply_core_changes(changes, sandbox_path) if not apply_result["success"]: return { "success": False, "issues": [f"Failed to apply core changes: {apply_result['error']}"], "test_id": test_id } # Run comprehensive tests test_results = await self._run_core_tests(changes, sandbox_path) # Cleanup await self._cleanup_sandbox(test_id) return { "success": test_results["all_passed"], "issues": test_results["failed_tests"], "test_id": test_id, "test_results": test_results } except Exception as e: await self._cleanup_sandbox(test_id) return { "success": False, "issues": [f"Core validation failed: {str(e)}"], "test_id": test_id } async def promote_to_production(self, test_id: str, changes: Dict[str, Any]) -> bool: """Move tested changes to production environment.""" try: # Additional validation before promotion validation_result = await self._validate_promotion(test_id, changes) if not validation_result["valid"]: logger.error(f"Promotion validation failed: {validation_result['issues']}") return False # Apply changes to production promotion_result = await self._apply_production_changes(changes) if promotion_result["success"]: logger.info(f"Successfully promoted changes from test {test_id}") return True else: logger.error(f"Promotion failed: {promotion_result['error']}") return False except Exception as e: logger.error(f"Promotion error: {str(e)}") return False async def _setup_minimal_sandbox(self, sandbox_path: Path): """Setup minimal sandbox environment.""" # Create basic directory structure (sandbox_path / "capabilities").mkdir(exist_ok=True) (sandbox_path / "tests").mkdir(exist_ok=True) # Copy minimal configuration config_content = """ # Minimal ACP test environment testing_mode: true capabilities: [] """ (sandbox_path / "config.yaml").write_text(config_content) async def _setup_realistic_sandbox(self, sandbox_path: Path): """Setup realistic sandbox with sample data.""" await self._setup_minimal_sandbox(sandbox_path) # Add sample capabilities sample_cap = """ # Sample capability for testing async def sample_capability(): return "test_result" """ (sandbox_path / "capabilities" / "sample.py").write_text(sample_cap) # Add test data test_data = {"test_queries": ["hello", "world", "test"]} (sandbox_path / "test_data.json").write_text(str(test_data)) async def _setup_comprehensive_sandbox(self, sandbox_path: Path): """Setup comprehensive sandbox with full system clone.""" await self._setup_realistic_sandbox(sandbox_path) # Copy core system files # This would include router, security validator, etc. # For now, create placeholders (sandbox_path / "router").mkdir(exist_ok=True) (sandbox_path / "security").mkdir(exist_ok=True) # Add comprehensive test suite comprehensive_tests = """ # Comprehensive test suite import pytest @pytest.mark.asyncio async def test_system_integrity(): assert True @pytest.mark.asyncio async def test_security_validation(): assert True @pytest.mark.asyncio async def test_router_functionality(): assert True """ (sandbox_path / "tests" / "comprehensive.py").write_text(comprehensive_tests) async def _deploy_to_sandbox(self, capability: Dict[str, Any], sandbox_path: Path) -> Dict[str, Any]: """Deploy capability to sandbox environment.""" try: cap_name = capability.get("name", "unknown") cap_code = capability.get("proposed_code", "") if not cap_code: return {"success": False, "error": "No capability code provided"} # Write capability file cap_file = sandbox_path / "capabilities" / f"{cap_name}.py" cap_file.write_text(cap_code) return {"success": True} except Exception as e: return {"success": False, "error": str(e)} async def _run_capability_tests(self, capability: Dict[str, Any], sandbox_path: Path) -> Dict[str, Any]: """Run tests for capability.""" tests = { "basic_functionality": await self._test_basic_functionality(capability, sandbox_path), "error_handling": await self._test_error_handling(capability, sandbox_path), "integration": await self._test_integration(capability, sandbox_path) } all_passed = all(test["passed"] for test in tests.values()) failed_tests = [name for name, test in tests.items() if not test["passed"]] return { "all_passed": all_passed, "failed_tests": failed_tests, "detailed_results": tests } async def _apply_core_changes(self, changes: Dict[str, Any], sandbox_path: Path) -> Dict[str, Any]: """Apply core changes to sandbox.""" try: # This would apply changes to router, security, etc. # For now, simulate success return {"success": True} except Exception as e: return {"success": False, "error": str(e)} async def _run_core_tests(self, changes: Dict[str, Any], sandbox_path: Path) -> Dict[str, Any]: """Run comprehensive core tests.""" # This would run the full test suite # For now, simulate results return { "all_passed": True, "failed_tests": [], "detailed_results": { "system_integrity": {"passed": True}, "security_validation": {"passed": True}, "router_functionality": {"passed": True} } } async def _cleanup_sandbox(self, test_id: str): """Clean up sandbox environment.""" if test_id in self.test_environments: sandbox_path = self.test_environments[test_id] try: if sandbox_path.exists(): shutil.rmtree(sandbox_path) del self.test_environments[test_id] logger.info(f"Cleaned up sandbox: {test_id}") except Exception as e: logger.error(f"Failed to cleanup sandbox {test_id}: {str(e)}") async def _validate_promotion(self, test_id: str, changes: Dict[str, Any]) -> Dict[str, Any]: """Validate changes before promotion to production.""" issues = [] # Check if test was successful # This would check actual test results # For now, assume validation passes return { "valid": len(issues) == 0, "issues": issues } async def _apply_production_changes(self, changes: Dict[str, Any]) -> Dict[str, Any]: """Apply changes to production environment.""" try: # This would apply changes to the actual system # For now, simulate success return {"success": True} except Exception as e: return {"success": False, "error": str(e)} # Test methods async def _test_basic_functionality(self, capability: Dict[str, Any], sandbox_path: Path) -> Dict[str, Any]: """Test basic functionality of capability.""" try: # This would actually test the capability # For now, simulate success return {"passed": True, "message": "Basic functionality test passed"} except Exception as e: return {"passed": False, "message": f"Basic functionality test failed: {str(e)}"} async def _test_error_handling(self, capability: Dict[str, Any], sandbox_path: Path) -> Dict[str, Any]: """Test error handling of capability.""" try: # This would test error scenarios # For now, simulate success return {"passed": True, "message": "Error handling test passed"} except Exception as e: return {"passed": False, "message": f"Error handling test failed: {str(e)}"} async def _test_integration(self, capability: Dict[str, Any], sandbox_path: Path) -> Dict[str, Any]: """Test integration with other capabilities.""" try: # This would test integration scenarios # For now, simulate success return {"passed": True, "message": "Integration test passed"} except Exception as e: return {"passed": False, "message": f"Integration test failed: {str(e)}"}

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/ciphernaut/katamari-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server