"""
Custom MCP Client for Product Recommendation System
Uses official MCP SDK with Azure OpenAI for testing all MCP methods
"""
import asyncio
import os
import json
from dotenv import load_dotenv
from mcp import ClientSession
from mcp.client.streamable_http import streamablehttp_client
from openai import AzureOpenAI
# Load environment variables
load_dotenv()
# Azure OpenAI configuration
AZURE_ENDPOINT = os.getenv("AZURE_OPENAI_ENDPOINT")
AZURE_API_KEY = os.getenv("AZURE_OPENAI_API_KEY")
AZURE_API_VERSION = os.getenv("AZURE_OPENAI_API_VERSION")
AZURE_DEPLOYMENT = os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME")
# MCP Server configuration
MCP_SERVER_URL = "http://localhost:8000/mcp"
def print_section(title: str):
"""Print a formatted section header"""
print("\n" + "="*80)
print(f" {title}")
print("="*80)
def print_result(query: str, result: str):
"""Print query and result in a formatted way"""
print(f"\nπ Query: {query}")
print(f"β
Result:\n{result}\n")
async def test_all_mcp_methods():
"""Test all MCP server methods"""
print_section("PRODUCT RECOMMENDATION MCP CLIENT - TESTING ALL METHODS")
print(f"\nπ Connecting to MCP Server: {MCP_SERVER_URL}")
print(f"π€ Using Azure OpenAI: {AZURE_DEPLOYMENT}")
# Initialize Azure OpenAI client
azure_client = AzureOpenAI(
api_key=AZURE_API_KEY,
api_version=AZURE_API_VERSION,
azure_endpoint=AZURE_ENDPOINT
)
# Connect to MCP server
async with streamablehttp_client(MCP_SERVER_URL) as streams:
async with ClientSession(streams[0], streams[1]) as session:
# Initialize the session
await session.initialize()
print("β Connected to MCP server successfully!\n")
# List available tools
tools_result = await session.list_tools()
print(f"π¦ Available tools: {len(tools_result.tools)}")
for tool in tools_result.tools:
print(f" - {tool.name}: {tool.description}")
# Convert MCP tools to OpenAI function format
openai_tools = []
for tool in tools_result.tools:
openai_tool = {
"type": "function",
"function": {
"name": tool.name,
"description": tool.description,
"parameters": tool.inputSchema
}
}
openai_tools.append(openai_tool)
# Test cases
test_cases = [
{
"title": "TEST 1: Search Products (Natural Language)",
"query": "I'm looking for a waterproof tent for camping"
},
{
"title": "TEST 2: Get All Categories",
"query": "What product categories are available in the catalog?"
},
{
"title": "TEST 3: Get All Brands",
"query": "What brands do you have available?"
},
{
"title": "TEST 4: Search Products by Category",
"query": "Show me hiking boots from the Hiking Footwear category"
},
{
"title": "TEST 5: Search Products by Brand",
"query": "What products does HikeMate offer?"
},
{
"title": "TEST 6: Search Products by Category and Brand",
"query": "Find tents from OutdoorLiving brand"
},
{
"title": "TEST 7: Get Product Description",
"query": "Tell me everything about the TrailMaster X4 Tent"
},
{
"title": "TEST 8: Search with Price Filter (Less Than)",
"query": "Find hiking clothing items that cost less than 100 USD"
},
{
"title": "TEST 9: Search with Price Filter (Greater Than)",
"query": "Show me premium products over 200 USD"
},
{
"title": "TEST 10: Complex Query",
"query": "I need an affordable backpack for hiking, preferably under 100 USD"
},
]
# Run all test cases
for test_case in test_cases:
print_section(test_case["title"])
query = test_case["query"]
# Create messages for Azure OpenAI
messages = [
{
"role": "system",
"content": (
"You are a helpful product recommendation assistant. "
"Use the available tools to search for products, get categories, brands, "
"and provide detailed product information. "
"Always provide clear, concise answers based on the tool results."
)
},
{
"role": "user",
"content": query
}
]
# Call Azure OpenAI with function calling
response = azure_client.chat.completions.create(
model=AZURE_DEPLOYMENT,
messages=messages,
tools=openai_tools,
tool_choice="auto"
)
# Process the response
response_message = response.choices[0].message
# Check if tool calls are needed
if response_message.tool_calls:
# Execute tool calls
messages.append(response_message)
for tool_call in response_message.tool_calls:
function_name = tool_call.function.name
function_args = json.loads(tool_call.function.arguments)
print(f" π§ Calling tool: {function_name}")
print(f" π Arguments: {json.dumps(function_args, indent=2)}")
# Call the MCP tool
tool_result = await session.call_tool(function_name, arguments=function_args)
# Add tool response to messages
messages.append({
"role": "tool",
"tool_call_id": tool_call.id,
"content": json.dumps(tool_result.content[0].text if tool_result.content else {})
})
# Get final response from Azure OpenAI
final_response = azure_client.chat.completions.create(
model=AZURE_DEPLOYMENT,
messages=messages
)
result = final_response.choices[0].message.content
else:
result = response_message.content
print_result(query, result)
print_section("ALL TESTS COMPLETED SUCCESSFULLY! β¨")
print("\nπ The MCP server and client are working perfectly!")
print("π All 8 MCP methods have been tested with various scenarios.")
print("\n" + "="*80 + "\n")
def main():
"""Main entry point"""
print("\n" + "π "*20)
print(" PRODUCT RECOMMENDATION SYSTEM - MCP CLIENT TEST SUITE")
print("π "*20 + "\n")
try:
asyncio.run(test_all_mcp_methods())
except KeyboardInterrupt:
print("\n\nβ οΈ Test interrupted by user")
except Exception as e:
print(f"\n\nβ Error during testing: {e}")
import traceback
traceback.print_exc()
if __name__ == "__main__":
main()