#!/usr/bin/env python3
"""
Test script to validate legendary upgrades integration
"""
import asyncio
import json
import sys
import os
# Add the current directory to Python path
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
async def test_legendary_capabilities():
"""Test all legendary capabilities are properly integrated"""
print("š Testing Legendary MCP Server Integration")
print("=" * 50)
# Import the tools directly
from mcp_server import (
list_capabilities, get_system_status, autonomous_architect,
proactive_quality_assurance, evolutionary_prompt_optimization,
last_mile_cloud_deployment, legendary_generate_application
)
# Test 1: List capabilities
print("\nš Testing list_capabilities...")
try:
capabilities = await list_capabilities()
print("ā
Capabilities loaded successfully")
print(f" - Standard agents: {len(capabilities.get('standard_agents', {}))}")
print(f" - Legendary agents: {len(capabilities.get('legendary_agents', {}))}")
print(f" - Revolutionary features: {len(capabilities.get('revolutionary_features', {}))}")
print(f" - Legendary tools: {len(capabilities.get('legendary_tools', []))}")
print(f" - Innovation level: {capabilities.get('innovation_level', 'Unknown')}")
except Exception as e:
print(f"ā Capabilities test failed: {e}")
return False
# Test 2: System status with legendary upgrades
print("\nš„ Testing enhanced system status...")
try:
status = await get_system_status()
print("ā
System status retrieved successfully")
print(f" - Server: {status.get('server')}")
print(f" - Revolutionary capabilities: {status.get('revolutionary_capabilities')}")
print(f" - Legendary agents available: {bool(status.get('legendary_agents'))}")
except Exception as e:
print(f"ā System status test failed: {e}")
return False
# Test 3: Autonomous Architect
print("\nš¤ Testing Autonomous Architect...")
try:
result = await autonomous_architect(
project_goals=["Create a scalable web application", "Implement AI-driven features"],
constraints=["Budget: $10k", "Timeline: 3 months"],
learning_objectives=["Improve architecture patterns", "Optimize performance"]
)
print("ā
Autonomous Architect test successful")
print(f" - Success: {result.get('success')}")
if result.get('success'):
print(f" - Confidence: {result.get('confidence_score', 0)}")
else:
print(f" - Error: {result.get('error', 'Unknown error')}")
except Exception as e:
print(f"ā Autonomous Architect test failed: {e}")
# Test 4: Proactive Quality Assurance
print("\nš”ļø Testing Proactive Quality Assurance...")
try:
result = await proactive_quality_assurance(
code_context="def hello_world(): print('Hello, World!')",
quality_standards=["PEP8", "Security"],
auto_remediation=True
)
print("ā
Proactive Quality test successful")
print(f" - Success: {result.get('success')}")
if result.get('success'):
print(f" - Quality score: {result.get('quality_score', 'N/A')}")
else:
print(f" - Error: {result.get('error', 'Unknown error')}")
except Exception as e:
print(f"ā Proactive Quality test failed: {e}")
# Test 5: Evolutionary Prompt Optimization
print("\n𧬠Testing Evolutionary Prompt Engine...")
try:
result = await evolutionary_prompt_optimization(
base_prompt="Write a function to calculate fibonacci numbers",
optimization_goals=["clarity", "effectiveness", "specificity"],
performance_metrics={"accuracy": 0.85, "response_time": 2.5}
)
print("ā
Evolutionary Prompt test successful")
print(f" - Success: {result.get('success')}")
if result.get('success'):
print(f" - Optimized prompt available: {bool(result.get('optimized_prompt'))}")
else:
print(f" - Error: {result.get('error', 'Unknown error')}")
except Exception as e:
print(f"ā Evolutionary Prompt test failed: {e}")
# Test 6: Last Mile Cloud Deployment
print("\nāļø Testing Last Mile Cloud Agent...")
try:
result = await last_mile_cloud_deployment(
application_context="Node.js REST API with PostgreSQL database",
target_environments=["staging", "production"],
verification_requirements=["Health checks", "Performance tests", "Security scans"]
)
print("ā
Last Mile Cloud test successful")
print(f" - Success: {result.get('success')}")
if result.get('success'):
print(f" - Deployment plan available: {bool(result.get('deployment_plan'))}")
else:
print(f" - Error: {result.get('error', 'Unknown error')}")
except Exception as e:
print(f"ā Last Mile Cloud test failed: {e}")
# Test 7: Revolutionary Application Generation
print("\nš Testing Revolutionary Application Generation...")
try:
result = await legendary_generate_application(
description="Build a revolutionary AI-powered social media platform with autonomous content moderation and predictive user engagement",
complexity_level="revolutionary",
innovation_requirements=["AI-driven architecture", "Self-improving algorithms", "Autonomous scaling"],
deployment_strategy="multi-cloud-autonomous"
)
print("ā
Revolutionary Application Generation test successful")
print(f" - Success: {result.get('success')}")
if result.get('success'):
print(f" - Innovation score: {result.get('innovation_score', 0)}")
print(f" - Legendary agents used: {len(result.get('legendary_agents_used', []))}")
else:
print(f" - Error: {result.get('error', 'Unknown error')}")
print(f" - Fallback: {result.get('legendary_fallback', 'None')}")
except Exception as e:
print(f"ā Revolutionary Application Generation test failed: {e}")
print("\nšÆ Legendary Integration Test Complete!")
print("=" * 50)
print("š Ready for revolutionary deployment!")
return True
if __name__ == "__main__":
asyncio.run(test_legendary_capabilities())