test_setup.py•14.9 kB
#!/usr/bin/env python3.11
"""
Comprehensive test script for GCP MCP Server setup and functionality.
"""
import asyncio
import json
import os
import sys
import tempfile
import traceback
from pathlib import Path
from unittest.mock import Mock, patch
# Add the current directory to Python path
sys.path.insert(0, str(Path(__file__).parent))
def test_imports():
"""Test that all required modules can be imported."""
print("🔄 Testing imports...")
try:
import gcp_mcp
print("✅ gcp_mcp package imported successfully")
except ImportError as e:
print(f"❌ Failed to import gcp_mcp: {e}")
return False
try:
from gcp_mcp.server import GCPMCPServer
print("✅ GCPMCPServer imported successfully")
except ImportError as e:
print(f"❌ Failed to import GCPMCPServer: {e}")
return False
try:
from gcp_mcp.config import Config
print("✅ Config imported successfully")
except ImportError as e:
print(f"❌ Failed to import Config: {e}")
return False
try:
from gcp_mcp.tools.logging_tools import LoggingTools
from gcp_mcp.tools.enterprise_logging_tools import EnterpriseLoggingTools
from gcp_mcp.tools.monitoring_tools import MonitoringTools
from gcp_mcp.tools.enterprise_monitoring_tools import EnterpriseMonitoringTools
print("✅ All tool classes imported successfully")
except ImportError as e:
print(f"❌ Failed to import tool classes: {e}")
return False
return True
def test_config_loading():
"""Test configuration loading functionality."""
print("\n🔄 Testing configuration loading...")
try:
from gcp_mcp.config import Config
# Test default config
config = Config()
print("✅ Default config created successfully")
assert config.max_results == 1000
print("✅ Default config values are correct")
# Test config with environment variables
os.environ["GCP_PROJECT"] = "test-project"
config = Config.load()
assert config.default_project == "test-project"
print("✅ Environment variable config loading works")
# Clean up
del os.environ["GCP_PROJECT"]
return True
except Exception as e:
print(f"❌ Config loading failed: {e}")
traceback.print_exc()
return False
def test_validation():
"""Test validation functionality."""
print("\n🔄 Testing validation...")
try:
from gcp_mcp.validation import ProjectValidator, TimeRangeValidator
# Test valid project ID
result = ProjectValidator.validate_project_id("valid-project-123")
assert result == "valid-project-123"
print("✅ Valid project ID validation works")
# Test invalid project ID
try:
ProjectValidator.validate_project_id("INVALID")
print("❌ Should have failed for invalid project ID")
return False
except Exception:
print("✅ Invalid project ID correctly rejected")
# Test time range validation
result = TimeRangeValidator.validate_time_range("1h")
assert result["start"] == "1h"
print("✅ Time range validation works")
return True
except Exception as e:
print(f"❌ Validation testing failed: {e}")
traceback.print_exc()
return False
def test_cache():
"""Test caching functionality."""
print("\n🔄 Testing cache...")
try:
from gcp_mcp.cache import EnterpriseCache
cache = EnterpriseCache(max_size_mb=1, default_ttl=60)
# Test basic operations
asyncio.run(cache.set("test_key", "test_value"))
result = asyncio.run(cache.get("test_key"))
assert result == "test_value"
print("✅ Cache set/get operations work")
# Test non-existent key
result = asyncio.run(cache.get("non_existent"))
assert result is None
print("✅ Cache miss handling works")
# Test stats
stats = asyncio.run(cache.get_stats())
assert "hits" in stats
assert "misses" in stats
print("✅ Cache statistics work")
return True
except Exception as e:
print(f"❌ Cache testing failed: {e}")
traceback.print_exc()
return False
def test_tool_creation():
"""Test that tools can be created and return proper schemas."""
print("\n🔄 Testing tool creation...")
try:
from gcp_mcp.config import Config
from gcp_mcp.auth import GCPAuthenticator
from gcp_mcp.tools.logging_tools import LoggingTools
from gcp_mcp.tools.enterprise_logging_tools import EnterpriseLoggingTools
# Mock authenticator to avoid GCP calls
config = Config()
authenticator = Mock(spec=GCPAuthenticator)
authenticator.get_project_id.return_value = "test-project"
# Test basic logging tools
logging_tools = LoggingTools(authenticator, config)
tools = asyncio.run(logging_tools.get_tools())
assert len(tools) == 4
print("✅ Basic logging tools created successfully")
# Test enterprise logging tools
enterprise_tools = EnterpriseLoggingTools(authenticator, config)
enterprise_tool_list = asyncio.run(enterprise_tools.get_tools())
assert len(enterprise_tool_list) == 6
print("✅ Enterprise logging tools created successfully")
# Verify tool schemas
for tool in tools:
assert hasattr(tool, 'name')
assert hasattr(tool, 'description')
assert hasattr(tool, 'inputSchema')
print(f"✅ Tool '{tool.name}' has valid schema")
return True
except Exception as e:
print(f"❌ Tool creation failed: {e}")
traceback.print_exc()
return False
def test_server_initialization():
"""Test server initialization without GCP calls."""
print("\n🔄 Testing server initialization...")
try:
from gcp_mcp.server import GCPMCPServer
# Create a temporary config file
with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f:
config_data = {
"default_project": "test-project",
"authentication": {"method": "application_default"},
"logging": {"level": "DEBUG", "format": "text"},
"cache": {"enabled": False},
"rate_limiting": {"enabled": False}
}
json.dump(config_data, f)
config_path = f.name
try:
# Mock GCP clients to avoid actual GCP calls
with patch('gcp_mcp.auth.default') as mock_default, \
patch('gcp_mcp.auth.cloud_logging.Client') as mock_logging, \
patch('gcp_mcp.auth.monitoring_v3.MetricServiceClient') as mock_monitoring:
mock_default.return_value = (Mock(), "test-project")
server = GCPMCPServer(config_path)
print("✅ Server initialized successfully")
assert server.config.default_project == "test-project"
print("✅ Server config loaded correctly")
assert server.authenticator is not None
print("✅ Authenticator created")
assert server.logging_tools is not None
print("✅ Logging tools created")
assert server.enterprise_logging_tools is not None
print("✅ Enterprise logging tools created")
finally:
# Clean up temp file
os.unlink(config_path)
return True
except Exception as e:
print(f"❌ Server initialization failed: {e}")
traceback.print_exc()
return False
def test_cli_parsing():
"""Test CLI argument parsing."""
print("\n🔄 Testing CLI parsing...")
try:
from gcp_mcp.cli import main
from click.testing import CliRunner
runner = CliRunner()
# Test help command
result = runner.invoke(main, ['--help'])
assert result.exit_code == 0
assert "GCP MCP server" in result.output
print("✅ CLI help works")
# Test with invalid credentials path (should fail gracefully)
result = runner.invoke(main, ['--credentials', '/nonexistent/path.json'])
assert result.exit_code != 0
print("✅ CLI handles invalid credentials path")
return True
except Exception as e:
print(f"❌ CLI testing failed: {e}")
traceback.print_exc()
return False
def test_mcp_tool_schemas():
"""Test that MCP tool schemas are valid."""
print("\n🔄 Testing MCP tool schemas...")
try:
from gcp_mcp.tools.logging_tools import LoggingTools
from gcp_mcp.config import Config
from gcp_mcp.auth import GCPAuthenticator
from unittest.mock import Mock
config = Config()
authenticator = Mock(spec=GCPAuthenticator)
authenticator.get_project_id.return_value = "test-project"
logging_tools = LoggingTools(authenticator, config)
tools = asyncio.run(logging_tools.get_tools())
for tool in tools:
# Verify required MCP tool fields
assert hasattr(tool, 'name'), f"Tool missing 'name': {tool}"
assert hasattr(tool, 'description'), f"Tool missing 'description': {tool}"
assert hasattr(tool, 'inputSchema'), f"Tool missing 'inputSchema': {tool}"
# Verify schema structure
schema = tool.inputSchema
assert isinstance(schema, dict), f"Tool {tool.name} schema is not a dict"
assert schema.get('type') == 'object', f"Tool {tool.name} schema type is not 'object'"
print(f"✅ Tool '{tool.name}' has valid MCP schema")
return True
except Exception as e:
print(f"❌ MCP schema validation failed: {e}")
traceback.print_exc()
return False
def test_sample_credentials_extraction():
"""Test extracting project ID from credentials file."""
print("\n🔄 Testing credentials project extraction...")
try:
# Create a sample credentials file
sample_creds = {
"type": "service_account",
"project_id": "sample-project-123",
"private_key_id": "key_id",
"private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvQ...\n-----END PRIVATE KEY-----\n",
"client_email": "test@sample-project-123.iam.gserviceaccount.com",
"client_id": "123456789",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://oauth2.googleapis.com/token"
}
with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f:
json.dump(sample_creds, f)
creds_path = f.name
try:
# Test the credential reading logic from CLI
with open(creds_path) as f:
creds_data = json.load(f)
project_id = creds_data.get("project_id")
assert project_id == "sample-project-123"
print("✅ Project ID extraction from credentials works")
finally:
os.unlink(creds_path)
return True
except Exception as e:
print(f"❌ Credentials extraction test failed: {e}")
traceback.print_exc()
return False
def test_error_handling():
"""Test error handling scenarios."""
print("\n🔄 Testing error handling...")
try:
from gcp_mcp.exceptions import ValidationError, GCPServiceError
from gcp_mcp.validation import ProjectValidator
# Test validation error
try:
ProjectValidator.validate_project_id("")
print("❌ Should have raised ValidationError for empty project ID")
return False
except ValidationError:
print("✅ ValidationError properly raised for invalid input")
# Test that our custom exceptions exist
assert issubclass(ValidationError, Exception)
assert issubclass(GCPServiceError, Exception)
print("✅ Custom exception classes are properly defined")
return True
except Exception as e:
print(f"❌ Error handling test failed: {e}")
traceback.print_exc()
return False
def run_all_tests():
"""Run all tests and return overall success."""
print("🧪 Running GCP MCP Server Test Suite")
print("=====================================\n")
tests = [
("Import Tests", test_imports),
("Config Loading", test_config_loading),
("Validation", test_validation),
("Cache Functionality", test_cache),
("Tool Creation", test_tool_creation),
("Server Initialization", test_server_initialization),
("CLI Parsing", test_cli_parsing),
("MCP Tool Schemas", test_mcp_tool_schemas),
("Credentials Extraction", test_sample_credentials_extraction),
("Error Handling", test_error_handling),
]
passed = 0
failed = 0
for test_name, test_func in tests:
print(f"\n📋 {test_name}")
print("-" * 50)
try:
if test_func():
passed += 1
print(f"✅ {test_name} PASSED")
else:
failed += 1
print(f"❌ {test_name} FAILED")
except Exception as e:
failed += 1
print(f"❌ {test_name} FAILED with exception: {e}")
traceback.print_exc()
print(f"\n📊 Test Results")
print("=" * 50)
print(f"✅ Passed: {passed}")
print(f"❌ Failed: {failed}")
print(f"📈 Success Rate: {passed/(passed+failed)*100:.1f}%")
if failed == 0:
print("\n🎉 All tests passed! The GCP MCP Server is ready to use.")
print("\n🔗 Next steps:")
print("1. Get your GCP service account credentials")
print("2. Run: python -m gcp_mcp.cli --credentials /path/to/credentials.json")
print("3. Add to Claude Code configuration")
return True
else:
print(f"\n⚠️ {failed} test(s) failed. Please check the issues above.")
return False
if __name__ == "__main__":
success = run_all_tests()
sys.exit(0 if success else 1)