test_lm_studio_simple.pyโข2.35 kB
#!/usr/bin/env python3
"""
Simple test for LM Studio without local fallback requirement.
"""
import asyncio
import sys
from pathlib import Path
# Add project root to path
sys.path.insert(0, str(Path(__file__).parent.parent))
from katamari_mcp.acp.llm_client import LMStudioBackend, LLMConfig
async def test_lm_studio_only():
"""Test LM Studio backend directly."""
print("๐ Testing LM Studio Backend Only...")
# Create LM Studio configuration
config = LLMConfig(
backend="lm_studio",
model_name="auto",
api_base="http://localhost:1234",
temperature=0.7,
max_tokens=1024
)
backend = LMStudioBackend()
try:
# Initialize
success = await backend.initialize(config)
print(f"โ
Initialization: {success}")
if not success:
return False
# Get model info
model_info = await backend.get_model_info()
print(f"๐ Available models: {len(model_info.get('available_models', []))}")
print(f"๐ฏ Selected model: {model_info.get('selected_model')}")
# Test generation
print("\n๐งช Testing code generation...")
prompt = "Generate a simple Python function that adds two numbers and returns the result."
response = await backend.generate(prompt, max_tokens=200)
print("โ
Generation successful!")
print("\n๐ Generated response:")
print("-" * 60)
print(response.strip())
print("-" * 60)
return True
except Exception as e:
print(f"โ Error: {e}")
import traceback
traceback.print_exc()
return False
finally:
await backend.cleanup()
async def main():
"""Main test function."""
print("๐ง LM Studio Direct Test")
print("=" * 50)
success = await test_lm_studio_only()
print("\n" + "=" * 50)
if success:
print("๐ LM Studio backend is working perfectly!")
print(" Ready for autonomous code generation!")
else:
print("๐ฅ LM Studio test failed!")
print(" Check that LM Studio is running on localhost:1234")
return 0 if success else 1
if __name__ == "__main__":
exit_code = asyncio.run(main())
sys.exit(exit_code)