#!/usr/bin/env python3
"""
Test the llm_chat tool with Ollama integration
"""
import asyncio
import sys
from server import MCPServer
async def test_llm_chat_tool():
"""Test the llm_chat tool functionality"""
print("Testing llm_chat tool with Ollama...")
try:
# Create server instance
server = MCPServer()
print("+ Server created successfully")
# Get the tool handler function
# We need to simulate calling the tool directly
from mcp.types import CallToolResult, TextContent
# Test 1: Basic chat with default model
print("+ Testing basic chat with default model...")
result = await server._MCPServer__setup_handlers.__wrapped__(server.server.call_tool())(
"llm_chat",
{"message": "What is 2+2? Answer with just the number."}
)
response_text = result.content[0].text
print(f"+ Chat response: '{response_text.strip()}'")
# Test 2: Chat with specific model
print("+ Testing chat with specific Ollama model...")
result2 = await server._MCPServer__setup_handlers.__wrapped__(server.server.call_tool())(
"llm_chat",
{
"message": "Hello! Introduce yourself briefly.",
"model": "mistral:latest"
}
)
response_text2 = result2.content[0].text
print(f"+ Introduction response: '{response_text2.strip()[:100]}...'")
# Test 3: Error handling with invalid model
print("+ Testing error handling with invalid model...")
result3 = await server._MCPServer__setup_handlers.__wrapped__(server.server.call_tool())(
"llm_chat",
{
"message": "This should fail",
"model": "nonexistent-model"
}
)
response_text3 = result3.content[0].text
print(f"+ Error response: '{response_text3.strip()}'")
print("\n+ LLM chat tool tests completed!")
return True
except Exception as e:
print(f"- LLM chat tool test failed: {e}")
import traceback
traceback.print_exc()
return False
# Alternative simpler test by directly testing the tool handler
async def test_llm_chat_simple():
"""Simpler test by creating handler manually"""
print("\nTesting llm_chat with direct handler call...")
try:
server = MCPServer()
# Manually call the llm_chat handler logic
from mcp.types import CallToolResult, TextContent
# Simulate the llm_chat tool call
arguments = {"message": "Say hello from Ollama!", "model": "mistral:latest"}
message = arguments.get("message")
model = arguments.get("model", "mistral:latest")
if not message:
print("- Missing message")
return False
response = await server.llm_integration.chat(message, model)
print(f"+ Direct chat response: '{response.strip()}'")
print("+ Direct tool test successful!")
return True
except Exception as e:
print(f"- Direct tool test failed: {e}")
import traceback
traceback.print_exc()
return False
if __name__ == "__main__":
print("Running LLM Chat Tool Tests...")
# Run the simpler test first
success1 = asyncio.run(test_llm_chat_simple())
if success1:
print("\n" + "="*50)
success2 = asyncio.run(test_llm_chat_tool())
success = success1 and success2
else:
success = False
sys.exit(0 if success else 1)