#!/usr/bin/env python3
"""
Test Ollama integration with the LLM system
"""
import asyncio
import sys
from llmintegrationsystem import LLMIntegrationSystem
async def test_ollama():
"""Test Ollama integration"""
print("Testing Ollama Integration...")
try:
# Create LLM integration system
llm = LLMIntegrationSystem()
print("+ LLM Integration System created")
# Test getting available models
models = await llm.get_ollama_models()
print(f"+ Available Ollama models: {models}")
if not models:
print("- No Ollama models found. Run 'ollama pull mistral' to install a model")
return False
# Test with the first available model
test_model = models[0]
print(f"+ Testing with model: {test_model}")
# Ensure model is available
model_available = await llm.ensure_ollama_model(test_model)
if model_available:
print(f"+ Model {test_model} is available")
else:
print(f"- Model {test_model} is not available")
return False
# Test simple chat
print("+ Testing chat completion...")
response = await llm.chat(
message="Say 'Hello from Ollama!' and nothing else",
model=test_model,
provider="ollama",
temperature=0.1,
max_tokens=50
)
print(f"+ Chat response: {response}")
if "ollama" in response.lower() or "hello" in response.lower():
print("+ Ollama chat test successful!")
else:
print("+ Ollama responded, but not as expected")
await llm.close()
print("\n+ All Ollama tests completed successfully!")
return True
except Exception as e:
print(f"- Ollama test failed: {e}")
return False
if __name__ == "__main__":
success = asyncio.run(test_ollama())
sys.exit(0 if success else 1)