test_ai_integration.py•8.04 kB
#!/usr/bin/env python3
"""
Test script for AI integration with DP-MCP server.
This script tests the AI functionality without requiring API keys,
using mock models and local data processing.
"""
import asyncio
import json
import sys
from pathlib import Path
# Add project root to Python path
project_root = Path(__file__).parent
sys.path.insert(0, str(project_root / "src"))
async def test_ai_integration():
"""Test AI integration components."""
print("🧪 Testing DP-MCP AI Integration")
print("=" * 50)
try:
# Test 1: Import AI modules
print("\n1️⃣ Testing AI module imports...")
from dp_mcp.ai.data_privacy import DataPrivacyManager, DataMaskingConfig, PrivacyLevel
from dp_mcp.ai.models import AIModelManager, ModelConfig, ModelProvider
from dp_mcp.ai.config_templates import create_demo_setup, validate_setup
from dp_mcp.ai.ai_tools import AIEnhancedTools
print("✅ All AI modules imported successfully")
# Test 2: Data Privacy Manager
print("\n2️⃣ Testing Data Privacy Manager...")
privacy_manager = DataPrivacyManager(DataMaskingConfig(
privacy_level=PrivacyLevel.MODERATE,
max_rows=10,
max_string_length=50
))
# Test sensitive data detection
test_text = "Contact john@example.com or call 555-123-4567"
sanitized = privacy_manager.sanitize_text(test_text)
print(f"Original: {test_text}")
print(f"Sanitized: {sanitized}")
# Test query validation
safe_query = "SELECT COUNT(*) FROM users"
unsafe_query = "SELECT * FROM users; DROP TABLE users"
is_safe, msg = privacy_manager.validate_ai_prompt(safe_query)
print(f"Safe query validation: {is_safe} - {msg}")
is_safe, msg = privacy_manager.validate_ai_prompt(unsafe_query)
print(f"Unsafe query validation: {is_safe} - {msg}")
print("✅ Data Privacy Manager working correctly")
# Test 3: AI Model Manager with Demo Setup
print("\n3️⃣ Testing AI Model Manager...")
ai_manager = create_demo_setup()
models = ai_manager.list_models()
print(f"Available models: {models}")
# Test mock model
if models:
mock_response = await ai_manager.generate_response(
"Test prompt for mock model",
model_name=models[0]
)
print(f"Mock AI response: {mock_response[:100]}...")
# Validate setup
status = validate_setup(ai_manager)
print(f"Setup validation: {status}")
print("✅ AI Model Manager working correctly")
# Test 4: AI Enhanced Tools
print("\n4️⃣ Testing AI Enhanced Tools...")
ai_tools = AIEnhancedTools("demo")
# Test status check
ai_status = ai_tools.get_ai_status()
print(f"AI Tools Status:")
print(json.dumps(ai_status, indent=2))
print("✅ AI Enhanced Tools initialized correctly")
# Test 5: Privacy Levels
print("\n5️⃣ Testing Privacy Levels...")
test_data = [
{"id": 1, "name": "John Doe", "email": "john@example.com", "phone": "555-123-4567"},
{"id": 2, "name": "Jane Smith", "email": "jane@example.com", "ssn": "123-45-6789"}
]
for level in [PrivacyLevel.BASIC, PrivacyLevel.STRICT, PrivacyLevel.PARANOID]:
manager = DataPrivacyManager(DataMaskingConfig(privacy_level=level))
sanitized = manager.sanitize_query_result(test_data)
print(f"\n{level.value.upper()} privacy level:")
print(json.dumps(sanitized, indent=2))
print("\n✅ Privacy levels working correctly")
# Test 6: Configuration Templates
print("\n6️⃣ Testing Configuration Templates...")
from dp_mcp.ai.config_templates import get_setup_for_environment
for env in ["development", "production", "enterprise", "demo"]:
try:
setup = get_setup_for_environment(env)
models = setup.list_models()
print(f"{env.upper()}: {len(models)} models - {models}")
except Exception as e:
print(f"{env.upper()}: Setup failed - {e}")
print("✅ Configuration templates working correctly")
print("\n🎉 ALL TESTS PASSED!")
print("\nAI Integration Summary:")
print("- ✅ Data privacy and sanitization working")
print("- ✅ AI model management functional")
print("- ✅ Configuration templates available")
print("- ✅ Mock models ready for testing")
print("- ✅ Multiple privacy levels supported")
print("\nTo use with real AI models:")
print("1. Copy .env.ai.sample to .env.ai")
print("2. Configure your API keys or local models")
print("3. Restart the server with --ai-env production")
print("4. Test with MCP tools: get_ai_system_status")
return True
except ImportError as e:
print(f"❌ Import Error: {e}")
print("Make sure all dependencies are installed:")
print(" uv sync")
return False
except Exception as e:
print(f"❌ Test Failed: {e}")
import traceback
traceback.print_exc()
return False
async def test_server_integration():
"""Test that the server can load AI tools without errors."""
print("\n🔧 Testing Server Integration")
print("=" * 50)
try:
# Test server import with AI
from dp_mcp.server import AI_AVAILABLE, get_ai_tools, initialize_ai_tools
print(f"AI Available in server: {AI_AVAILABLE}")
if AI_AVAILABLE:
# Test initialization
ai_tools = initialize_ai_tools("demo")
print("✅ AI tools initialized in server context")
# Test status
status = ai_tools.get_ai_status()
print(f"Server AI Status: {status['features_enabled']}")
print("✅ Server integration working correctly")
return True
except Exception as e:
print(f"❌ Server integration failed: {e}")
import traceback
traceback.print_exc()
return False
async def main():
"""Run all tests."""
print("🚀 DP-MCP AI Integration Test Suite")
print("==================================")
# Check if we're in the right directory
if not Path("src/dp_mcp").exists():
print("❌ Please run this script from the DP-MCP project root directory")
sys.exit(1)
# Run tests
ai_test_passed = await test_ai_integration()
server_test_passed = await test_server_integration()
print("\n📊 Test Results Summary")
print("=" * 30)
print(f"AI Integration Tests: {'✅ PASSED' if ai_test_passed else '❌ FAILED'}")
print(f"Server Integration Tests: {'✅ PASSED' if server_test_passed else '❌ FAILED'}")
if ai_test_passed and server_test_passed:
print("\n🎉 All tests passed! AI integration is ready.")
print("\nNext steps:")
print("1. Start the server: uv run python src/dp_mcp/server.py --ai-env demo --debug")
print("2. Test AI tools via MCP client or CLI")
print("3. Configure real AI models for production use")
return 0
else:
print("\n❌ Some tests failed. Please check the output above.")
return 1
if __name__ == "__main__":
try:
exit_code = asyncio.run(main())
sys.exit(exit_code)
except KeyboardInterrupt:
print("\n⏹️ Tests interrupted by user")
sys.exit(1)
except Exception as e:
print(f"\n💥 Unexpected error: {e}")
sys.exit(1)