test_model_info_tools.pyโข7.22 kB
#!/usr/bin/env python3
"""
Test Model Info Tools for MCP
Tests the new model information tools accessible through MCP
"""
import asyncio
import sys
from pathlib import Path
# Add src to path
sys.path.insert(0, str(Path(__file__).parent))
from src.tools.model_info_tools import ModelInfoTools
async def test_model_info_tools():
"""Test all model info tools"""
print("=" * 60)
print("TESTING MODEL INFO TOOLS")
print("=" * 60)
tools = ModelInfoTools()
# Test 1: Get available models
print("\n๐ Test 1: Get Available Models")
print("-" * 40)
try:
result = await tools.get_available_models()
summary = result.get("summary", {})
print(f"โ
Total models found: {summary['total_models']}")
print(f"โ
Providers available: {summary['providers_available']}")
if summary.get('cheapest_model'):
cheap = summary['cheapest_model']
print(f"๐ฐ Cheapest model: {cheap['name']} - ${cheap['cost']:.4f}/1k tokens")
if summary.get('largest_context'):
large = summary['largest_context']
print(f"๐ Largest context: {large['name']} - {large['size']:,} tokens")
# Show first model from each provider
for provider, info in result.get("providers", {}).items():
if info['models']:
model = info['models'][0]
print(f"\n{provider.upper()}: {model['name']}")
print(f" Context: {model['context_window']:,} tokens")
print(f" Cost: ${model['cost']['input_per_1k']:.4f} in / ${model['cost']['output_per_1k']:.4f} out")
except Exception as e:
print(f"โ Error: {e}")
# Test 2: Compare models
print("\n\n๐ Test 2: Compare Models")
print("-" * 40)
try:
result = await tools.compare_models("claude-3-opus", "gpt-4-turbo")
if "comparison" in result:
comp = result["comparison"]
print(f"โ
Context ratio: {comp.get('context_ratio', 'N/A')}")
print(f"โ
Larger context: {comp.get('larger_context', 'N/A')}")
if "cost_ratio" in comp:
cost = comp["cost_ratio"]
print(f"๐ฐ Cheaper model: {comp.get('cheaper', 'N/A')}")
print(f" Input cost ratio: {cost.get('input', 0):.2f}x")
print(f" Output cost ratio: {cost.get('output', 0):.2f}x")
if "recommendations" in result:
print("\n๐ก Recommendations:")
for rec in result["recommendations"]:
print(f" โข {rec}")
except Exception as e:
print(f"โ Error: {e}")
# Test 3: Suggest model for task
print("\n\n๐ฏ Test 3: Suggest Model for Task")
print("-" * 40)
try:
# Test for code review task
result = await tools.suggest_model_for_task(
task_type="code_review",
context_size=30000,
budget=0.5
)
if result.get("error"):
print(f"โ {result['error']}")
else:
print(f"โ
Suggested model: {result['suggestion']}")
print(f" Provider: {result['provider']}")
print(f" Context: {result['context_window']:,} tokens")
print(f" Cost: ${result['estimated_cost']:.4f}")
if result.get("reasoning"):
print(f"\n๐ {result['reasoning']}")
if result.get("alternatives"):
print("\nAlternatives:")
for alt in result["alternatives"][:2]:
print(f" โข {alt['model']} - ${alt['cost']:.4f}")
except Exception as e:
print(f"โ Error: {e}")
# Test 4: Get current model status
print("\n\n๐ค Test 4: Current Model Status")
print("-" * 40)
try:
result = await tools.get_current_model_status()
print(f"โ
Current model: {result['current_model']}")
print(f" Provider: {result['provider']}")
cap = result['capabilities']
print(f"\n๐ Capabilities:")
print(f" Context: {cap['context_window']:,} tokens")
print(f" Max output: {cap['max_output']:,} tokens")
features = []
if cap.get('supports_vision'):
features.append("Vision")
if cap.get('supports_functions'):
features.append("Functions")
if cap.get('supports_json_mode'):
features.append("JSON mode")
if features:
print(f" Features: {', '.join(features)}")
if result.get('cost_estimate'):
cost = result['cost_estimate']
print(f"\n๐ฐ Typical cost: ${cost.get('total', 0):.4f} (10k in, 2k out)")
if result.get('context_usage'):
usage = result['context_usage']
print(f"\n๐ Context usage: {usage['utilization']}")
if result.get('recommendations'):
print("\n๐ก Recommendations:")
for rec in result['recommendations'][:2]:
print(f" โข {rec}")
except Exception as e:
print(f"โ Error: {e}")
# Test 5: Estimate operation cost
print("\n\n๐ฐ Test 5: Estimate Operation Cost")
print("-" * 40)
try:
result = await tools.estimate_operation_cost(
operation="code_review",
input_size=50000,
output_size=3000,
model="claude-3-opus"
)
print(f"โ
Operation: {result['operation']}")
print(f" Model: {result['model']}")
tokens = result['tokens']
print(f"\n๐ Tokens:")
print(f" Input: {tokens['input']:,}")
print(f" Output: {tokens['output']:,}")
print(f" Total: {tokens['total']:,}")
cost = result['cost']
print(f"\n๐ต Cost:")
print(f" Total: ${cost['total']:.4f}")
print(f" Input: ${cost['breakdown']['input']:.4f}")
print(f" Output: ${cost['breakdown']['output']:.4f}")
if result.get('cheaper_alternatives'):
print(f"\n๐ Cheaper alternatives:")
for alt in result['cheaper_alternatives'][:2]:
print(f" โข {alt['model']}: ${alt['cost']:.4f} (save {alt['savings_percent']:.0f}%)")
if result.get('recommendation'):
print(f"\n{result['recommendation']}")
except Exception as e:
print(f"โ Error: {e}")
print("\n" + "=" * 60)
print("โ
MODEL INFO TOOLS TEST COMPLETE")
print("=" * 60)
print("\nThe model information is now accessible through MCP!")
print("Connected AI can use these tools to:")
print(" โข Query available models and their capabilities")
print(" โข Compare different models")
print(" โข Get cost estimates for operations")
print(" โข Suggest best model for specific tasks")
print(" โข Report current model status to users")
async def main():
"""Run all tests"""
await test_model_info_tools()
if __name__ == "__main__":
asyncio.run(main())