ollama_client.py•8.88 kB
import asyncio
import json
import sys
import http.client
import ollama
import re
from mcp.client.session import ClientSession
from mcp.client.stdio import stdio_client, StdioServerParameters
OLLAMA_MODEL = "phi"
def detect_tool_intent(user_input):
"""Force detection of tool calls based on keywords."""
user_input = user_input.lower()
# Weather detection
if any(word in user_input for word in ['weather', 'temperature', 'rain', 'sunny', 'cloudy']):
# Extract city name
words = user_input.split()
city = None
# Look for "in [city]" pattern
if 'in' in words:
try:
city_idx = words.index('in') + 1
city = words[city_idx].title()
except:
pass
# Look for common city names if no "in" pattern
if not city:
common_cities = ['london', 'paris', 'tokyo', 'new york', 'berlin', 'madrid', 'rome', 'sydney']
for city_name in common_cities:
if city_name in user_input:
city = city_name.title()
break
if not city:
city = "London" # Default
return {"action": "tool_call", "tool": "get_weather_tool", "args": {"city": city}}
# Math detection
if any(symbol in user_input for symbol in ['+', '-', '*', '/', 'calculate', 'math', '=']):
# Extract mathematical expression
expression = user_input
for word in ['calculate', 'what is', 'what\'s', '?']:
expression = expression.replace(word, '').strip()
return {"action": "tool_call", "tool": "calculate_tool", "args": {"expression": expression}}
# Note detection
if any(word in user_input for word in ['note', 'create note', 'add note', 'new note']):
# TODO: NLP parsing action create or search
title = "New Note"
content = user_input
return {
"action": "tool_call",
"tool": "note_tool",
"args": {
"title": title,
"content": content,
"category": "general"
}
}
# Search note detection
if any(word in user_input for word in ['search note', 'find note', 'look for note', 'search', 'find']):
# TODO: NLP parsing user input into title, content, category
query = user_input
for phrase in ['search note', 'find note', 'look for note', 'search', 'find']:
query = query.replace(phrase, '').strip()
category = ""
return {
"action": "tool_call",
"tool": "search_note_tool",
"args": {
"query": query,
"category": category
}
}
# Time detection
if any(word in user_input for word in ['time', 'date', 'clock', 'now']):
return {"action": "tool_call", "tool": "time_tool", "args": {"timezone": "local"}}
# Greeting detection
if any(word in user_input for word in ['hello', 'hi', 'greet', 'say hello']):
# Extract name
name_match = re.search(r'(?:hello|hi|greet)?\s*(?:to)?\s*([A-Za-z]+)', user_input)
name = name_match.group(1).title() if name_match else "Friend"
return {"action": "tool_call", "tool": "say_hello", "args": {"name": name}}
# CLI Command
if any(phrase in user_input for phrase in ['run command', 'run this', 'command line:']):
match = re.search(r"(?:run command|run this|command line:)\s*['\"]([^'\"]+)['\"]", user_input)
if match:
command = match.group(1).strip()
return {
"action": "tool_call",
"tool": "command_line_tool",
"args": {"command": command, "safe_mode": True}
}
else:
for phrase in ['run command', 'run this', 'command line:']:
if phrase in user_input:
command = user_input.split(phrase,1)[1].strip()
return {
"action": "tool_call",
"tool": "command_line_tool",
"args": {"command": command, "safe_mode": True}
}
return None
def check_ollama_running():
try:
conn = http.client.HTTPConnection("localhost", 11434, timeout=2)
conn.request("GET", "/api/tags")
resp = conn.getresponse()
if resp.status == 200:
print("✅ Ollama server detected at http://localhost:11434")
return True
except Exception:
pass
print("❌ Could not connect to Ollama at http://localhost:11434")
print("💡 Start it with: ollama serve")
return False
async def main():
if not check_ollama_running():
sys.exit(1)
server_commands = StdioServerParameters(command="python", args=["simple_modular_server.py"])
try:
async with stdio_client(server_commands) as (read_stream, write_stream):
async with ClientSession(read_stream, write_stream) as session:
await session.initialize()
print("✅ Connected to modular MCP server + Ollama")
print("🎯 Ready to chat!")
print("💡 Try: 'weather in London', 'calculate 15*42', 'what time is it', 'say hello to Alice'")
while True:
try:
user_input = input("\n💬 You: ").strip()
if user_input.lower() in {"quit", "exit"}:
break
elif not user_input:
continue
# First, try forced tool detection
decision = detect_tool_intent(user_input)
if decision:
print(f"🔍 Detected tool intent: {decision}")
else:
# Fallback to LLM for chat
try:
llm_response = ollama.chat(
model=OLLAMA_MODEL,
messages=[
{"role": "system", "content": "Respond naturally to the user's message."},
{"role": "user", "content": user_input}
]
)
decision = {"action": "chat", "reply": llm_response["message"]["content"]}
except:
decision = {"action": "chat", "reply": "I'm not sure how to help with that."}
# Execute the decision
if decision.get("action") == "tool_call":
tool_name = decision.get("tool")
args = decision.get("args", {})
print(f"🛠 Calling `{tool_name}` with {args}...")
try:
result = await session.call_tool(tool_name, args)
print("📋 Result:")
# Handle the result
if hasattr(result, 'content') and result.content:
if isinstance(result.content, list):
for item in result.content:
if hasattr(item, 'text'):
try:
parsed = json.loads(item.text)
print(json.dumps(parsed, indent=2))
except:
print(item.text)
else:
print(item)
else:
print(result.content)
else:
print(result)
except Exception as e:
error_msg = f"❌ Tool error: {e}"
print(error_msg)
elif decision.get("action") == "chat":
reply = decision.get("reply")
print("💬", reply)
except KeyboardInterrupt:
print("\n👋 Goodbye!")
break
except Exception as e:
print(f"❌ Error: {e}")
except Exception as e:
print(f"❌ Failed to connect to MCP server: {e}")
print("💡 Make sure simple_modular_server.py and tools/ directory exist")
if __name__ == "__main__":
asyncio.run(main())