client_example.py•5.1 kB
import requests
import json
import sys
# Server URL
SERVER_URL = "http://localhost:8000"
def index_documents(directory="docs"):
"""Index documents using the MCP server"""
url = f"{SERVER_URL}/mcp/action"
payload = {
"action_type": "index_documents",
"payload": {
"directory": directory
}
}
print(f"Indexing documents from directory: {directory}")
response = requests.post(url, json=payload)
if response.status_code == 200:
result = response.json()
if result["status"] == "success":
print(f"Success: {result['data']['message']}")
else:
print(f"Error: {result['error']}")
else:
print(f"HTTP Error: {response.status_code} - {response.text}")
def query_documents(query_text, top_k=3):
"""Query documents using the MCP server"""
url = f"{SERVER_URL}/mcp/action"
payload = {
"action_type": "retrieve_documents",
"payload": {
"query": query_text,
"top_k": top_k
}
}
print(f"Querying: '{query_text}'")
response = requests.post(url, json=payload)
if response.status_code == 200:
result = response.json()
if result["status"] == "success":
print(f"\nFound {result['data']['total_results']} documents:")
for i, doc in enumerate(result['data']['documents']):
print(f"\n--- Result {i+1} (Score: {doc['score']:.4f}) ---")
print(f"Source: {doc['path']} (Chunk {doc['chunk_index']})")
print(f"Content: {doc['content'][:200]}...")
return result['data']['documents']
else:
print(f"Error: {result['error']}")
return []
else:
print(f"HTTP Error: {response.status_code} - {response.text}")
return []
def ai_agent_rag_example(query):
"""Simulated AI agent using RAG via MCP"""
print("=" * 80)
print(f"AI Agent processing query: '{query}'")
print("=" * 80)
# Step 1: Retrieve relevant documents
print("\nStep 1: Retrieving relevant documents...")
documents = query_documents(query)
if not documents:
return "I couldn't find any relevant information to answer your question."
# Step 2: Simulated AI processing with retrieved context
print("\nStep 2: Generating response based on retrieved documents...")
# In a real system, this is where you'd pass the documents to an LLM
# For this example, we'll just simulate a response using the content
# Extract content from documents for context
context = "\n\n".join([doc['content'] for doc in documents])
# Simulated response based on retrieved documents
# In a real implementation, this would be the LLM's response
response = f"Based on the retrieved information, I can tell you that:\n\n"
if "MCP" in query or "protocol" in query.lower():
response += "The Machine Conversation Protocol (MCP) is a standardized protocol for AI agents to communicate with tools and APIs. It provides structured interactions through actions with clear types and payloads. MCP enables AI systems to request information, perform actions, and receive standardized responses."
elif "RAG" in query or "retrieval" in query.lower():
response += "Retrieval-Augmented Generation (RAG) combines retrieval systems with generative models to produce more accurate and contextual outputs. It works by retrieving relevant documents from a knowledge base, then augmenting the generative model with this information before producing a response."
else:
response += "I found some information that might be relevant to your query. The documents discuss concepts related to Machine Conversation Protocol (MCP) and Retrieval-Augmented Generation (RAG), which are important technologies for AI systems."
print("\n" + "=" * 80)
print("Final AI Response:")
print(response)
print("=" * 80)
return response
if __name__ == "__main__":
# Check if we should index first
if len(sys.argv) > 1 and sys.argv[1] == "--index":
index_documents()
print("\n")
# Example queries
queries = [
"What is the Machine Conversation Protocol?",
"Explain how RAG systems work",
"What are the benefits of using vector databases?",
"How does MCP relate to RAG systems?"
]
# Run an example query or let user input one
if len(sys.argv) > 2:
ai_agent_rag_example(sys.argv[2])
else:
print("Available example queries:")
for i, q in enumerate(queries):
print(f"{i+1}. {q}")
choice = input("\nEnter query number or type your own query: ")
try:
idx = int(choice) - 1
if 0 <= idx < len(queries):
query = queries[idx]
else:
query = choice
except ValueError:
query = choice
ai_agent_rag_example(query)