#!/usr/bin/env python3
"""
Test Ollama with Mistral model specifically
"""
import asyncio
import sys
from llmintegrationsystem import LLMIntegrationSystem
async def test_ollama_mistral():
"""Test Ollama with Mistral model"""
print("Testing Ollama with Mistral...")
try:
llm = LLMIntegrationSystem()
# Test with mistral specifically
model = "mistral:latest"
print(f"+ Testing with model: {model}")
# Test simple chat
print("+ Testing chat completion...")
response = await llm.chat(
message="What is 2+2? Answer with just the number.",
model=model,
provider="ollama",
temperature=0.1,
max_tokens=10
)
print(f"+ Chat response: '{response.strip()}'")
# Test another question
response2 = await llm.chat(
message="Say hello and introduce yourself as an AI assistant.",
model=model,
provider="ollama",
temperature=0.3,
max_tokens=100
)
print(f"+ Introduction response: '{response2.strip()}'")
await llm.close()
print("\n+ Mistral test completed successfully!")
return True
except Exception as e:
print(f"- Mistral test failed: {e}")
import traceback
traceback.print_exc()
return False
if __name__ == "__main__":
success = asyncio.run(test_ollama_mistral())
sys.exit(0 if success else 1)