#!/usr/bin/env python3
"""
Complete test of MCP server with Ollama integration
"""
import asyncio
import sys
from server import MCPServer
async def test_complete_server():
"""Test complete server functionality with Ollama"""
print("Testing complete MCP server with Ollama...")
try:
# Create server instance
server = MCPServer()
print("+ Server created successfully")
# Test 1: Direct LLM integration
print("\n+ Testing direct LLM integration...")
response = await server.llm_integration.chat(
"What is the capital of France? Answer with just the city name.",
"mistral:latest"
)
print(f"+ LLM Response: '{response.strip()}'")
# Test 2: Memory system with LLM
print("\n+ Testing memory + LLM integration...")
# Store a memory
memory_id = await server.chat_memory.store_memory(
"test_conv_llm",
"User prefers short answers",
{"type": "preference"}
)
print(f"+ Stored memory with ID: {memory_id}")
# Get memory
memories = await server.chat_memory.get_memory("test_conv_llm")
print(f"+ Retrieved {len(memories)} memories")
# Test 3: Available models
print("\n+ Testing model availability...")
ollama_models = await server.llm_integration.get_ollama_models()
print(f"+ Available Ollama models: {ollama_models}")
# Test 4: Provider selection
print("\n+ Testing provider selection...")
all_models = server.llm_integration.get_available_models()
print(f"+ All available providers and models: {all_models}")
# Test 5: Different temperature settings
print("\n+ Testing temperature variations...")
creative_response = await server.llm_integration.chat(
"Write a very short creative greeting",
"mistral:latest",
temperature=0.9,
max_tokens=50
)
print(f"+ Creative response: '{creative_response.strip()}'")
factual_response = await server.llm_integration.chat(
"What is 10 * 5?",
"mistral:latest",
temperature=0.1,
max_tokens=10
)
print(f"+ Factual response: '{factual_response.strip()}'")
# Clean up
await server.llm_integration.close()
print("\n+ All server tests completed successfully!")
return True
except Exception as e:
print(f"- Server test failed: {e}")
import traceback
traceback.print_exc()
return False
async def test_tool_simulation():
"""Simulate tool calls like an MCP client would"""
print("\n" + "="*60)
print("Simulating MCP Tool Calls...")
try:
server = MCPServer()
# Simulate echo tool
print("\n+ Simulating echo tool...")
echo_args = {"text": "Hello MCP with Ollama!"}
if echo_args.get("text"):
echo_result = echo_args["text"]
print(f"+ Echo result: {echo_result}")
# Simulate llm_chat tool
print("\n+ Simulating llm_chat tool...")
chat_args = {
"message": "Explain what MCP (Model Context Protocol) is in one sentence.",
"model": "mistral:latest"
}
if chat_args.get("message"):
message = chat_args["message"]
model = chat_args.get("model", "mistral:latest")
chat_response = await server.llm_integration.chat(message, model)
print(f"+ Chat result: {chat_response.strip()}")
# Simulate memory tools
print("\n+ Simulating memory tools...")
store_args = {
"conversation_id": "sim_conv_123",
"content": "User asked about MCP",
"metadata": {"topic": "protocol"}
}
memory_id = await server.chat_memory.store_memory(
store_args["conversation_id"],
store_args["content"],
store_args.get("metadata", {})
)
print(f"+ Stored memory: {memory_id}")
get_args = {"conversation_id": "sim_conv_123"}
memories = await server.chat_memory.get_memory(get_args["conversation_id"])
print(f"+ Retrieved memories: {len(memories)} items")
await server.llm_integration.close()
print("\n+ Tool simulation completed successfully!")
return True
except Exception as e:
print(f"- Tool simulation failed: {e}")
import traceback
traceback.print_exc()
return False
if __name__ == "__main__":
print("Complete MCP Server Test Suite")
print("=" * 60)
# Run both tests
success1 = asyncio.run(test_complete_server())
success2 = asyncio.run(test_tool_simulation())
if success1 and success2:
print("\n" + "=" * 60)
print("š ALL TESTS PASSED! Your MCP server with Ollama is working perfectly!")
print("\nTo use the server:")
print("1. Run: python server.py")
print("2. Connect with an MCP client")
print("3. Use tools: echo, llm_chat, store_memory, get_memory")
print("4. Default model is now 'mistral:latest' via Ollama")
else:
print("\nā Some tests failed. Check the output above.")
sys.exit(0 if (success1 and success2) else 1)