discover_lm_studio.pyโข5.4 kB
#!/usr/bin/env python3
"""
Discover and test LM Studio instances on the local network.
"""
import asyncio
import sys
from pathlib import Path
# Add project root to path
sys.path.insert(0, str(Path(__file__).parent.parent))
from katamari_mcp.acp.llm_client import LMStudioBackend, LLMConfig
async def discover_and_test():
"""Discover LM Studio instances and test them."""
print("๐ LM Studio Discovery Tool")
print("=" * 50)
# Create LM Studio backend
backend = LMStudioBackend()
# Test with auto-discovery
print("๐ Starting auto-discovery...")
config = LLMConfig(
backend="lm_studio",
model_name="auto",
api_base="auto" # Trigger auto-discovery
)
try:
success = await backend.initialize(config)
if not success:
print("โ Failed to initialize LM Studio backend")
return False
print("โ
LM Studio backend initialized successfully!")
# Get model information
model_info = await backend.get_model_info()
print(f"\n๐ Discovery Results:")
print(f" API Base: {backend.api_base}")
print(f" Available Models: {len(model_info.get('available_models', []))}")
print(f" Selected Model: {model_info.get('selected_model', 'None')}")
# Show available models
if model_info.get('available_models'):
print(f"\n๐ค Available Models:")
for i, model in enumerate(model_info['available_models'][:10]): # Show first 10
model_id = model.get('id', 'Unknown')
print(f" {i+1:2d}. {model_id}")
if len(model_info['available_models']) > 10:
print(f" ... and {len(model_info['available_models']) - 10} more")
# Test generation if we have a model
if model_info.get('selected_model'):
print(f"\n๐งช Testing text generation...")
test_prompt = "Write a simple Python function that returns 'Hello World'."
try:
response = await backend.generate(test_prompt, max_tokens=150)
print("โ
Text generation successful!")
print(f"\n๐ Generated Response:")
print("-" * 50)
print(response.strip())
print("-" * 50)
except Exception as e:
print(f"โ Text generation failed: {e}")
return False
else:
print("\nโ ๏ธ No model selected for testing")
return True
except Exception as e:
print(f"โ Error during discovery: {e}")
return False
finally:
await backend.cleanup()
async def test_specific_urls():
"""Test specific URLs for LM Studio."""
print("\n๐ฏ Testing Specific URLs...")
# Common LM Studio URLs to test
test_urls = [
"http://localhost:1234",
"http://localhost:1235",
"http://localhost:8080",
"http://127.0.0.1:1234",
"http://0.0.0.0:1234",
]
backend = LMStudioBackend()
for url in test_urls:
print(f" Testing {url}...", end=" ")
try:
is_lm_studio = await backend._check_lm_studio_at_url(url)
if is_lm_studio:
print("โ
Found LM Studio!")
# Try to get models
config = LLMConfig(
backend="lm_studio",
model_name="auto",
api_base=url
)
try:
await backend.initialize(config)
model_info = await backend.get_model_info()
model_count = len(model_info.get('available_models', []))
selected = model_info.get('selected_model', 'None')
print(f" ๐ {model_count} models available, selected: {selected}")
await backend.cleanup()
except Exception as e:
print(f" โ Failed to get models: {e}")
else:
print("โ Not LM Studio")
except Exception as e:
print(f"โ Error: {e}")
async def main():
"""Main function."""
print("๐ Katamari MCP - LM Studio Discovery")
print("=" * 60)
# Auto-discovery test
discovery_success = await discover_and_test()
# Specific URL tests
await test_specific_urls()
print("\n" + "=" * 60)
print("๐ SUMMARY:")
print(f" Auto-discovery: {'โ
Success' if discovery_success else 'โ Failed'}")
if discovery_success:
print("\n๐ LM Studio is ready for use!")
print(" You can now run the full ACP pipeline with LLM generation.")
else:
print("\n๐ก Setup Instructions:")
print(" 1. Install LM Studio from https://lmstudio.ai/")
print(" 2. Start LM Studio")
print(" 3. Load a model (recommended: CodeLlama, DeepSeek Coder, or Qwen)")
print(" 4. Start the server (usually on port 1234)")
print(" 5. Run this script again to verify connection")
return 0 if discovery_success else 1
if __name__ == "__main__":
exit_code = asyncio.run(main())
sys.exit(exit_code)