Skip to main content
Glama

Katamari MCP Server

by ciphernaut
discover_lm_studio.pyโ€ข5.4 kB
#!/usr/bin/env python3 """ Discover and test LM Studio instances on the local network. """ import asyncio import sys from pathlib import Path # Add project root to path sys.path.insert(0, str(Path(__file__).parent.parent)) from katamari_mcp.acp.llm_client import LMStudioBackend, LLMConfig async def discover_and_test(): """Discover LM Studio instances and test them.""" print("๐Ÿ” LM Studio Discovery Tool") print("=" * 50) # Create LM Studio backend backend = LMStudioBackend() # Test with auto-discovery print("๐Ÿš€ Starting auto-discovery...") config = LLMConfig( backend="lm_studio", model_name="auto", api_base="auto" # Trigger auto-discovery ) try: success = await backend.initialize(config) if not success: print("โŒ Failed to initialize LM Studio backend") return False print("โœ… LM Studio backend initialized successfully!") # Get model information model_info = await backend.get_model_info() print(f"\n๐Ÿ“Š Discovery Results:") print(f" API Base: {backend.api_base}") print(f" Available Models: {len(model_info.get('available_models', []))}") print(f" Selected Model: {model_info.get('selected_model', 'None')}") # Show available models if model_info.get('available_models'): print(f"\n๐Ÿค– Available Models:") for i, model in enumerate(model_info['available_models'][:10]): # Show first 10 model_id = model.get('id', 'Unknown') print(f" {i+1:2d}. {model_id}") if len(model_info['available_models']) > 10: print(f" ... and {len(model_info['available_models']) - 10} more") # Test generation if we have a model if model_info.get('selected_model'): print(f"\n๐Ÿงช Testing text generation...") test_prompt = "Write a simple Python function that returns 'Hello World'." try: response = await backend.generate(test_prompt, max_tokens=150) print("โœ… Text generation successful!") print(f"\n๐Ÿ“ Generated Response:") print("-" * 50) print(response.strip()) print("-" * 50) except Exception as e: print(f"โŒ Text generation failed: {e}") return False else: print("\nโš ๏ธ No model selected for testing") return True except Exception as e: print(f"โŒ Error during discovery: {e}") return False finally: await backend.cleanup() async def test_specific_urls(): """Test specific URLs for LM Studio.""" print("\n๐ŸŽฏ Testing Specific URLs...") # Common LM Studio URLs to test test_urls = [ "http://localhost:1234", "http://localhost:1235", "http://localhost:8080", "http://127.0.0.1:1234", "http://0.0.0.0:1234", ] backend = LMStudioBackend() for url in test_urls: print(f" Testing {url}...", end=" ") try: is_lm_studio = await backend._check_lm_studio_at_url(url) if is_lm_studio: print("โœ… Found LM Studio!") # Try to get models config = LLMConfig( backend="lm_studio", model_name="auto", api_base=url ) try: await backend.initialize(config) model_info = await backend.get_model_info() model_count = len(model_info.get('available_models', [])) selected = model_info.get('selected_model', 'None') print(f" ๐Ÿ“Š {model_count} models available, selected: {selected}") await backend.cleanup() except Exception as e: print(f" โŒ Failed to get models: {e}") else: print("โŒ Not LM Studio") except Exception as e: print(f"โŒ Error: {e}") async def main(): """Main function.""" print("๐Ÿš€ Katamari MCP - LM Studio Discovery") print("=" * 60) # Auto-discovery test discovery_success = await discover_and_test() # Specific URL tests await test_specific_urls() print("\n" + "=" * 60) print("๐Ÿ“‹ SUMMARY:") print(f" Auto-discovery: {'โœ… Success' if discovery_success else 'โŒ Failed'}") if discovery_success: print("\n๐ŸŽ‰ LM Studio is ready for use!") print(" You can now run the full ACP pipeline with LLM generation.") else: print("\n๐Ÿ’ก Setup Instructions:") print(" 1. Install LM Studio from https://lmstudio.ai/") print(" 2. Start LM Studio") print(" 3. Load a model (recommended: CodeLlama, DeepSeek Coder, or Qwen)") print(" 4. Start the server (usually on port 1234)") print(" 5. Run this script again to verify connection") return 0 if discovery_success else 1 if __name__ == "__main__": exit_code = asyncio.run(main()) sys.exit(exit_code)

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/ciphernaut/katamari-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server