test_lm_studio.pyโข6.01 kB
#!/usr/bin/env python3
"""
Test LM Studio connectivity and model discovery.
"""
import asyncio
import sys
from pathlib import Path
# Add project root to path
sys.path.insert(0, str(Path(__file__).parent.parent))
from katamari_mcp.acp.llm_client import LLMClient, LLMConfig
from katamari_mcp.utils.config import Config
async def test_lm_studio():
"""Test LM Studio backend."""
print("๐ Testing LM Studio Backend...")
# Load configuration
config = Config()
# Check if LM Studio URL is configured
lm_studio_url = config.get('lm_studio_url')
if not lm_studio_url:
print("โ LM Studio URL not configured. Set KATAMARI_LM_STUDIO_URL environment variable.")
print(" Example: export KATAMARI_LM_STUDIO_URL=http://localhost:1234")
return False
print(f"๐ก Using LM Studio URL: {lm_studio_url}")
# Create LM Studio configuration
lm_config = LLMConfig(
backend="lm_studio",
model_name="auto", # Will be auto-discovered
api_base=lm_studio_url,
temperature=0.7,
max_tokens=1024
)
# Create LLM client with only LM Studio backend
llm_client = LLMClient()
try:
# Initialize with LM Studio
success = await llm_client.initialize([lm_config])
if not success:
print("โ Failed to initialize LM Studio backend")
return False
print("โ
LM Studio backend initialized successfully")
# Get backend information
backend_info = await llm_client.get_backend_info()
print("\n๐ Backend Information:")
print(f" Primary backend: {backend_info.get('primary_backend')}")
print(f" Fallback backends: {backend_info.get('fallback_backends')}")
# Show model details
if 'backend_details' in backend_info:
for backend_name, details in backend_info['backend_details'].items():
print(f"\n๐ค {backend_name.upper()} Details:")
if 'available_models' in details:
print(f" Available models: {len(details['available_models'])}")
for model in details['available_models'][:5]: # Show first 5
model_id = model.get('id', 'Unknown')
print(f" - {model_id}")
if len(details['available_models']) > 5:
print(f" ... and {len(details['available_models']) - 5} more")
if 'selected_model' in details:
print(f" Selected model: {details['selected_model']}")
# Test generation
print("\n๐งช Testing text generation...")
test_prompt = "Write a simple Python function that adds two numbers."
try:
response = await llm_client.generate(test_prompt, max_tokens=200)
print("โ
Text generation successful!")
print("\n๐ Generated response:")
print("-" * 50)
print(response.strip())
print("-" * 50)
except Exception as e:
print(f"โ Text generation failed: {e}")
return False
return True
except Exception as e:
print(f"โ Error testing LM Studio: {e}")
return False
finally:
# Cleanup
try:
await llm_client.cleanup()
except:
pass
async def test_local_fallback():
"""Test local transformers fallback."""
print("\n๐ Testing Local Transformers Fallback...")
# Create local configuration
local_config = LLMConfig(
backend="transformers",
model_name="Qwen/Qwen2-1.5B-Instruct",
temperature=0.7,
max_tokens=512
)
llm_client = LLMClient()
try:
success = await llm_client.initialize([local_config])
if not success:
print("โ Failed to initialize local transformers backend")
return False
print("โ
Local transformers backend initialized")
# Get backend info
backend_info = await llm_client.get_backend_info()
if 'backend_details' in backend_info and 'local' in backend_info['backend_details']:
details = backend_info['backend_details']['local']
print(f" Device: {details.get('device', 'unknown')}")
print(f" Model loaded: {details.get('model_loaded', False)}")
# Test generation
print("\n๐งช Testing local generation...")
test_prompt = "What is 2 + 2?"
try:
response = await llm_client.generate(test_prompt, max_tokens=100)
print("โ
Local generation successful!")
print(f"\n๐ Response: {response.strip()}")
return True
except Exception as e:
print(f"โ Local generation failed: {e}")
return False
except Exception as e:
print(f"โ Error testing local backend: {e}")
return False
finally:
try:
await llm_client.cleanup()
except:
pass
async def main():
"""Main test function."""
print("๐ Katamari MCP - LLM Backend Test")
print("=" * 50)
# Test LM Studio first
lm_studio_success = await test_lm_studio()
# Test local fallback
local_success = await test_local_fallback()
print("\n" + "=" * 50)
print("๐ SUMMARY:")
print(f" LM Studio: {'โ
Working' if lm_studio_success else 'โ Failed'}")
print(f" Local: {'โ
Working' if local_success else 'โ Failed'}")
if lm_studio_success or local_success:
print("\n๐ At least one LLM backend is working!")
return 0
else:
print("\n๐ฅ No LLM backends are working!")
return 1
if __name__ == "__main__":
exit_code = asyncio.run(main())
sys.exit(exit_code)