Skip to main content
Glama

Katamari MCP Server

by ciphernaut
test_lm_studio.pyโ€ข6.01 kB
#!/usr/bin/env python3 """ Test LM Studio connectivity and model discovery. """ import asyncio import sys from pathlib import Path # Add project root to path sys.path.insert(0, str(Path(__file__).parent.parent)) from katamari_mcp.acp.llm_client import LLMClient, LLMConfig from katamari_mcp.utils.config import Config async def test_lm_studio(): """Test LM Studio backend.""" print("๐Ÿ” Testing LM Studio Backend...") # Load configuration config = Config() # Check if LM Studio URL is configured lm_studio_url = config.get('lm_studio_url') if not lm_studio_url: print("โŒ LM Studio URL not configured. Set KATAMARI_LM_STUDIO_URL environment variable.") print(" Example: export KATAMARI_LM_STUDIO_URL=http://localhost:1234") return False print(f"๐Ÿ“ก Using LM Studio URL: {lm_studio_url}") # Create LM Studio configuration lm_config = LLMConfig( backend="lm_studio", model_name="auto", # Will be auto-discovered api_base=lm_studio_url, temperature=0.7, max_tokens=1024 ) # Create LLM client with only LM Studio backend llm_client = LLMClient() try: # Initialize with LM Studio success = await llm_client.initialize([lm_config]) if not success: print("โŒ Failed to initialize LM Studio backend") return False print("โœ… LM Studio backend initialized successfully") # Get backend information backend_info = await llm_client.get_backend_info() print("\n๐Ÿ“Š Backend Information:") print(f" Primary backend: {backend_info.get('primary_backend')}") print(f" Fallback backends: {backend_info.get('fallback_backends')}") # Show model details if 'backend_details' in backend_info: for backend_name, details in backend_info['backend_details'].items(): print(f"\n๐Ÿค– {backend_name.upper()} Details:") if 'available_models' in details: print(f" Available models: {len(details['available_models'])}") for model in details['available_models'][:5]: # Show first 5 model_id = model.get('id', 'Unknown') print(f" - {model_id}") if len(details['available_models']) > 5: print(f" ... and {len(details['available_models']) - 5} more") if 'selected_model' in details: print(f" Selected model: {details['selected_model']}") # Test generation print("\n๐Ÿงช Testing text generation...") test_prompt = "Write a simple Python function that adds two numbers." try: response = await llm_client.generate(test_prompt, max_tokens=200) print("โœ… Text generation successful!") print("\n๐Ÿ“ Generated response:") print("-" * 50) print(response.strip()) print("-" * 50) except Exception as e: print(f"โŒ Text generation failed: {e}") return False return True except Exception as e: print(f"โŒ Error testing LM Studio: {e}") return False finally: # Cleanup try: await llm_client.cleanup() except: pass async def test_local_fallback(): """Test local transformers fallback.""" print("\n๐Ÿ” Testing Local Transformers Fallback...") # Create local configuration local_config = LLMConfig( backend="transformers", model_name="Qwen/Qwen2-1.5B-Instruct", temperature=0.7, max_tokens=512 ) llm_client = LLMClient() try: success = await llm_client.initialize([local_config]) if not success: print("โŒ Failed to initialize local transformers backend") return False print("โœ… Local transformers backend initialized") # Get backend info backend_info = await llm_client.get_backend_info() if 'backend_details' in backend_info and 'local' in backend_info['backend_details']: details = backend_info['backend_details']['local'] print(f" Device: {details.get('device', 'unknown')}") print(f" Model loaded: {details.get('model_loaded', False)}") # Test generation print("\n๐Ÿงช Testing local generation...") test_prompt = "What is 2 + 2?" try: response = await llm_client.generate(test_prompt, max_tokens=100) print("โœ… Local generation successful!") print(f"\n๐Ÿ“ Response: {response.strip()}") return True except Exception as e: print(f"โŒ Local generation failed: {e}") return False except Exception as e: print(f"โŒ Error testing local backend: {e}") return False finally: try: await llm_client.cleanup() except: pass async def main(): """Main test function.""" print("๐Ÿš€ Katamari MCP - LLM Backend Test") print("=" * 50) # Test LM Studio first lm_studio_success = await test_lm_studio() # Test local fallback local_success = await test_local_fallback() print("\n" + "=" * 50) print("๐Ÿ“‹ SUMMARY:") print(f" LM Studio: {'โœ… Working' if lm_studio_success else 'โŒ Failed'}") print(f" Local: {'โœ… Working' if local_success else 'โŒ Failed'}") if lm_studio_success or local_success: print("\n๐ŸŽ‰ At least one LLM backend is working!") return 0 else: print("\n๐Ÿ’ฅ No LLM backends are working!") return 1 if __name__ == "__main__": exit_code = asyncio.run(main()) sys.exit(exit_code)

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/ciphernaut/katamari-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server