verify_ai_config.py•3.9 kB
#!/usr/bin/env python3
"""
Verify AI configuration is set up correctly.
"""
import os
import subprocess
import requests
import json
def verify_ai_configuration():
"""Verify AI configuration status."""
print("🔍 DP-MCP AI Configuration Verification")
print("="*50)
# 1. Check .env.ai file
print("1️⃣ Configuration File Check:")
if os.path.exists('.env.ai'):
print(" ✅ .env.ai file exists")
# Check if placeholder keys are still there
with open('.env.ai', 'r') as f:
content = f.read()
if 'XXXX-REPLACE-WITH-YOUR-ACTUAL' in content:
print(" ⚠️ Placeholder API keys detected - replace with real keys for cloud models")
else:
print(" ✅ API keys appear to be configured")
if 'OLLAMA_BASE_URL=http://localhost:11434' in content:
print(" ✅ Ollama configuration found")
else:
print(" ❌ .env.ai file not found")
return
# 2. Check Ollama
print("\n2️⃣ Ollama Service Check:")
try:
response = requests.get('http://localhost:11434/api/tags', timeout=5)
if response.status_code == 200:
models = response.json().get('models', [])
print(f" ✅ Ollama running with {len(models)} models:")
for model in models:
size_gb = model['size'] / (1024**3)
print(f" • {model['name']}: {size_gb:.1f} GB")
else:
print(f" ❌ Ollama API error: {response.status_code}")
except Exception as e:
print(f" ❌ Ollama not accessible: {e}")
print(" 💡 Try: ollama serve")
# 3. Check MCP Server
print("\n3️⃣ MCP Server Check:")
try:
response = requests.get('http://127.0.0.1:8888/mcp/', timeout=5)
print(f" ✅ MCP Server running (HTTP {response.status_code})")
except Exception as e:
print(f" ❌ MCP Server not accessible: {e}")
print(" 💡 Try: uv run python src/dp_mcp/server.py --ai-env production --debug")
# 4. Model Size Summary
print("\n4️⃣ Model Size Reference:")
model_sizes = {
'phi3': '2.2 GB',
'mistral': '4.1 GB',
'llama2': '3.8 GB',
'codellama': '3.8 GB',
'llama2:13b': '7.3 GB'
}
for model, size in model_sizes.items():
print(f" • {model}: {size}")
# 5. Disk Space Check
print("\n5️⃣ Disk Space Check:")
try:
result = subprocess.run(['df', '-h', '/'], capture_output=True, text=True)
lines = result.stdout.strip().split('\n')
if len(lines) >= 2:
header = lines[0]
data = lines[1].split()
available = data[3] if len(data) > 3 else "Unknown"
print(f" Available space: {available}")
# Extract numeric value for comparison
if 'G' in available:
available_gb = float(available.replace('G', ''))
if available_gb > 10:
print(" ✅ Sufficient space for AI models")
else:
print(" ⚠️ Low disk space - consider cleanup")
except:
print(" ❓ Could not check disk space")
print("\n🎯 Configuration Summary:")
print(" • .env.ai: ✅ Ready")
print(" • Local Models: Ready via Ollama")
print(" • Cloud Models: Configure API keys to enable")
print(" • MCP Server: Ready for AI tools")
print("\n🚀 Next Steps:")
print(" 1. Replace API key placeholders in .env.ai (optional)")
print(" 2. Install more Ollama models: ollama pull mistral")
print(" 3. Test AI tools via MCP protocol")
print(" 4. Use natural language queries in your applications")
if __name__ == "__main__":
verify_ai_configuration()