Skip to main content
Glama
gedin-eth

College Football MCP

by gedin-eth
main.py32.5 kB
""" Agent Service - FastAPI application for chat interface This service orchestrates LLM calls and MCP server function calls to provide a conversational interface for college football data. """ from __future__ import annotations from fastapi import FastAPI, HTTPException, Header from fastapi.middleware.cors import CORSMiddleware from pydantic import BaseModel from typing import Optional, List, Dict, Any import os import uuid import logging import json from openai import AsyncOpenAI from mcp_client import MCPClient from research_client import ResearchClient # Configure logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) # Load environment variables APP_TOKEN = os.environ.get("APP_TOKEN", "") if not APP_TOKEN: logger.warning("APP_TOKEN env var is not set - authentication will fail") # MCP Server URL (defaults to localhost, can be overridden) MCP_SERVER_URL = os.environ.get("MCP_SERVER_URL", "http://localhost:8000") # OpenAI API Key OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY", "") if not OPENAI_API_KEY: logger.warning("OPENAI_API_KEY env var is not set - LLM features will not work") # Initialize OpenAI client (async for Responses API) openai_client = AsyncOpenAI(api_key=OPENAI_API_KEY) if OPENAI_API_KEY else None # Initialize MCP client mcp_client = MCPClient(MCP_SERVER_URL) # Initialize Research MCP client research_client = ResearchClient() app = FastAPI(title="CFB Agent Service", version="0.1.0") # CORS middleware - allow all origins for now (tighten in production) app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) class ChatRequest(BaseModel): """Request model for chat endpoint""" message: str thread_id: Optional[str] = None class ChatResponse(BaseModel): """Response model for chat endpoint""" reply: str thread_id: str # Define available tools for the LLM (Chat Completions format - GPT-5 works with this) MCP_TOOLS = [ { "type": "function", "function": { "name": "get_game_odds_and_score", "description": "Get live game scores, betting odds, and game details for a specific matchup. Use this as the PRIMARY tool for checking TODAY's games, live scores, game stats, or specific matchups. Returns detailed game information including scores, status, betting lines, and game details. Use this FIRST for any game-related queries.", "parameters": { "type": "object", "properties": { "team1": { "type": "string", "description": "First team name (required)" }, "team2": { "type": "string", "description": "Second team name (optional, for exact match)" } }, "required": ["team1"] } } }, { "type": "function", "function": { "name": "get_recent_player_stats", "description": "Get a player's statistics from their last 5 games. Use when user asks about a specific player's recent performance, stats, or how they've been playing.", "parameters": { "type": "object", "properties": { "player_name": { "type": "string", "description": "Player name (required)" }, "team": { "type": "string", "description": "Team name (optional, for disambiguation)" } }, "required": ["player_name"] } } }, { "type": "function", "function": { "name": "get_team_recent_results", "description": "Get a team's last 5 COMPLETED game results including opponents, scores, and win/loss. Use when user asks about past games, recent performance trends, or historical results. DO NOT use this for checking if a team won TODAY - use get_game_odds_and_score instead.", "parameters": { "type": "object", "properties": { "team": { "type": "string", "description": "Team name (required)" } }, "required": ["team"] } } }, { "type": "function", "function": { "name": "get_team_info", "description": "Get a team's current season overview including record, conference record, and rankings. Use when user asks about a team's season, record, ranking, or overall status.", "parameters": { "type": "object", "properties": { "team": { "type": "string", "description": "Team name (required)" } }, "required": ["team"] } } }, { "type": "function", "function": { "name": "get_next_game_odds", "description": "Get a team's next scheduled game and betting odds. Use when user asks about upcoming games, next opponent, or future betting lines for a team.", "parameters": { "type": "object", "properties": { "team": { "type": "string", "description": "Team name (required)" } }, "required": ["team"] } } }, { "type": "function", "function": { "name": "research_latest_score", "description": "Use AI-powered web research to find detailed game stats, box scores, or latest scores. This tool uses research-mcp to search the web for real-time information. USE THIS when users ask for: (1) detailed game statistics (passing yards, rushing yards, player stats, box scores), (2) latest/live scores when get_game_odds_and_score doesn't have them, (3) any stats beyond basic scores/odds, (4) users mention 'research' or 'research-mcp'. For basic scores/odds, try get_game_odds_and_score first, but for detailed stats ALWAYS use this tool. When users ask to use 'research' or 'research-mcp', this is the tool to use.", "parameters": { "type": "object", "properties": { "team1": { "type": "string", "description": "First team name (required)" }, "team2": { "type": "string", "description": "Second team name (optional, for specific matchups)" }, "context": { "type": "string", "description": "Additional context about what to research (e.g., 'latest score today', 'most recent game result')" } }, "required": ["team1"] } } } ] def extract_text_from_response(response) -> str: """ Extract text from a Responses API response object. Handles various response structures including ResponseReasoningItem lists. """ if response is None: return "" # Try output_text first (primary field) if hasattr(response, 'output_text'): output_text = response.output_text if output_text and isinstance(output_text, str): return output_text # Check output field if hasattr(response, 'output'): output = response.output if output: if isinstance(output, str): return output elif isinstance(output, list): # Iterate through list items (could be ResponseReasoningItem objects) for item in output: # Debug: log item structure logger.debug(f"Processing output item: {type(item)}, attributes: {dir(item) if hasattr(item, '__dict__') else 'N/A'}") # Try to get model_dump if it's a Pydantic model if hasattr(item, 'model_dump'): try: item_dict = item.model_dump() logger.debug(f"Item dict: {item_dict}") # Look for text, content, or message fields for key in ['text', 'content', 'message', 'output_text', 'output']: if key in item_dict: value = item_dict[key] if isinstance(value, str) and value: return value elif isinstance(value, list): for v in value: if isinstance(v, str) and v: return v elif isinstance(v, dict): for sub_key in ['text', 'content']: if sub_key in v and isinstance(v[sub_key], str) and v[sub_key]: return v[sub_key] except Exception as e: logger.debug(f"Error in model_dump: {e}") # Check for text attribute if hasattr(item, 'text') and item.text: text = item.text if isinstance(text, str): return text # Check for content attribute if hasattr(item, 'content'): content = item.content if isinstance(content, str): return content elif isinstance(content, list): for c in content: if hasattr(c, 'text') and c.text: return str(c.text) elif isinstance(c, str): return c elif hasattr(c, 'model_dump'): try: c_dict = c.model_dump() if 'text' in c_dict and isinstance(c_dict['text'], str): return c_dict['text'] except: pass # Check for message attribute if hasattr(item, 'message'): msg = item.message if hasattr(msg, 'content') and msg.content: return str(msg.content) elif hasattr(msg, 'model_dump'): try: msg_dict = msg.model_dump() if 'content' in msg_dict and isinstance(msg_dict['content'], str): return msg_dict['content'] except: pass # Check if item is a string if isinstance(item, str): return item # Try to access item as dict if isinstance(item, dict): for key in ['text', 'content', 'message', 'output_text']: if key in item: value = item[key] if isinstance(value, str) and value: return value # Check choices if hasattr(response, 'choices') and response.choices: for choice in response.choices: if hasattr(choice, 'text') and choice.text: return str(choice.text) elif hasattr(choice, 'message'): msg = choice.message if hasattr(msg, 'content') and msg.content: return str(msg.content) # Last resort: try to serialize the whole response logger.warning(f"Could not extract text from response: {type(response)}") try: if hasattr(response, 'model_dump'): response_dict = response.model_dump() logger.warning(f"Response dump keys: {list(response_dict.keys()) if isinstance(response_dict, dict) else 'N/A'}") # Try to find any string value in the dump def find_string_in_dict(d, max_depth=3): if max_depth <= 0: return None if isinstance(d, str) and len(d) > 10: # Only return substantial strings return d elif isinstance(d, dict): for v in d.values(): result = find_string_in_dict(v, max_depth - 1) if result: return result elif isinstance(d, list): for v in d: result = find_string_in_dict(v, max_depth - 1) if result: return result return None found_text = find_string_in_dict(response_dict) if found_text: return found_text except Exception as e: logger.debug(f"Error in model_dump fallback: {e}") return "" async def research_latest_score( team1: str, team2: Optional[str] = None, context: str = "" ) -> Optional[Dict[str, Any]]: """ Use research-mcp to find the latest score for a game. Args: team1: First team name team2: Optional second team name context: Additional context for the research query Returns: Research results with latest score information """ try: # Build research topic if team2: topic = f"Latest score and result for {team1} vs {team2} college football game today" else: topic = f"Latest score and result for {team1} college football game today" if context: topic += f". {context}" # Add date context for better search results from datetime import datetime today = datetime.now().strftime("%B %d, %Y") topic += f" ({today})" logger.info(f"Researching latest score: {topic}") # Call research-mcp with focused parameters for speed research_result = await research_client.run_research( topic=topic, iterations=1, # Single iteration for speed query_count=5, # 5 queries should be enough for score lookup page_size=5 # Smaller page size for faster response ) if not research_result: return {"error": "Research query failed"} # Extract synthesis if available synthesis = research_result.get("final_synthesis", {}) synthesis_text = "" if isinstance(synthesis, dict): # Try multiple possible keys for synthesis content synthesis_text = ( synthesis.get("text", "") or synthesis.get("content", "") or synthesis.get("raw_response", "") or str(synthesis.get("sections", "")) ) elif isinstance(synthesis, str): synthesis_text = synthesis # Also extract search results for additional context search_results = research_result.get("search_results_page", []) # Format response for LLM - make it VERY clear research was successful # Extract raw_response if available for better content raw_response = "" if isinstance(synthesis, dict) and "raw_response" in synthesis: raw_response = synthesis.get("raw_response", "") # Prefer raw_response if available, otherwise use synthesis_text final_content = raw_response if raw_response else synthesis_text result = { "research_performed": "YES - Research was successfully performed using research-mcp", "status": "success", "team1": team1, "team2": team2, "source": "AI-powered web research (research-mcp backend)" } # Always include findings - even if empty, make it clear research happened if final_content and len(final_content.strip()) > 0: result["research_results"] = final_content[:1500] # Include more content result["message"] = f"Research completed successfully. Here are the findings: {final_content[:1000]}" else: result["research_results"] = "Research query was executed but no specific game information was found in the search results. This may mean the game is not happening today, has already concluded, or information is not yet available online." result["message"] = "Research was performed successfully, but did not find evidence of this game happening today." if search_results: result["search_results_found"] = len(search_results) result["note"] = f"Research completed. Analyzed {len(search_results)} web sources." else: result["search_results_found"] = 0 if research_result.get("research_id"): result["research_id"] = research_result.get("research_id") return result except Exception as e: logger.error(f"Error in research_latest_score: {e}") import traceback logger.error(traceback.format_exc()) return {"error": f"Research failed: {str(e)}"} async def call_mcp_function(function_name: str, arguments: Dict[str, Any]) -> Optional[Dict[str, Any]]: """ Call an MCP server function. Args: function_name: Name of the MCP function to call arguments: Function arguments Returns: Function result or None if error """ try: if function_name == "get_game_odds_and_score": return await mcp_client.get_game_odds_and_score( arguments.get("team1"), arguments.get("team2") ) elif function_name == "get_recent_player_stats": return await mcp_client.get_recent_player_stats( arguments.get("player_name"), arguments.get("team") ) elif function_name == "get_team_recent_results": return await mcp_client.get_team_recent_results( arguments.get("team") ) elif function_name == "get_team_info": return await mcp_client.get_team_info( arguments.get("team") ) elif function_name == "get_next_game_odds": return await mcp_client.get_next_game_odds( arguments.get("team") ) elif function_name == "research_latest_score": return await research_latest_score( arguments.get("team1"), arguments.get("team2"), arguments.get("context", "") ) else: logger.error(f"Unknown function: {function_name}") return None except Exception as e: logger.error(f"Error calling MCP function {function_name}: {e}") return None async def run_agent(thread_id: str, message: str) -> str: """ Run the agent with LLM tool-calling loop using Responses API. Implements a tool-calling loop where the LLM can call MCP functions to retrieve college football data and provide informed responses. Args: thread_id: Conversation thread ID message: User message Returns: Agent response """ if not openai_client: return "Error: OpenAI API key not configured. Please set OPENAI_API_KEY environment variable." # Instructions to guide the LLM instructions = """You are a helpful assistant for college football information. You have access to tools that can retrieve: - Live game scores and betting odds (use for TODAY's games and current scores) - Player statistics from recent games - Team recent results and performance (past completed games) - Team season information and rankings - Next game schedules and odds - AI-powered web research for latest scores and detailed stats (tool name: research_latest_score - use this when users mention 'research' or 'research-mcp') IMPORTANT RULES: 1. When a user asks about a team winning/losing "today" or "right now", you MUST use get_game_odds_and_score to check for live games. 2. If get_game_odds_and_score returns a game with start_time matching today's date, that means the team HAS a game today, even if status is "scheduled" or "in_progress". 3. If status is "scheduled" but the game time has passed, the game is likely in progress - say "Miami has a game today against [opponent]" and check if scores are available. 4. If status is "in_progress" or "completed", provide the current/final score. 5. Only say "no game today" if get_game_odds_and_score returns no game or a game scheduled for a future date. 6. Use get_game_odds_and_score as the PRIMARY method for scores and odds. However, it does NOT provide detailed game statistics (passing yards, rushing yards, player stats, box scores). 7. When users ask for detailed game STATS (passing yards, rushing yards, player performance, box scores), use research_latest_score to search for live box score data. 8. Also use research_latest_score as a FALLBACK when: - get_game_odds_and_score fails to return a score (returns 404 or no score data) - The primary API shows a game is in progress but you need the final score and it's not updating - User explicitly asks for "latest" or "live" information that APIs don't have - User mentions "research" or "research-mcp" - the tool research_latest_score IS the research tool they're referring to 9. IMPORTANT: If a user mentions "research-mcp" or asks to use "research", you should use the research_latest_score tool. That IS the research tool available to you. DO NOT say you don't have access to research - you do have the research_latest_score tool. 10. When you use research_latest_score, the tool will search the web and return results. IMPORTANT: When research_latest_score returns data (even if it says "no evidence found" or similar), you HAVE successfully performed research. Always acknowledge that research was performed and present the findings, even if they indicate no game was found. DO NOT say "I don't have access" - you DO have access via research_latest_score. 11. CRITICAL: When research_latest_score returns ANY data (check for "research_performed", "status", or "research_results" keys), research WAS performed. You MUST acknowledge this. Say "I searched the web" or "I performed research" and present the findings. NEVER say "I can't access" or "I don't have access" when research_latest_score returns data - that means research was successful. 12. If the research results say "no evidence" or "no game found", you should say: "I searched for [game] but didn't find evidence of a game happening today" - this shows you DID perform research, just didn't find results. When calling tools, provide the arguments as a JSON object. For example, to call get_game_odds_and_score for Miami, use: {"team1": "Miami"}. Use these tools to answer user questions about college football. Be conversational and helpful. When you retrieve data, present it in a clear, readable format.""" try: # Use Chat Completions API for GPT-5 (works better with tool calling than Responses API) # Build messages for conversation messages = [ {"role": "system", "content": instructions}, {"role": "user", "content": message} ] # Tool-calling loop (max 3 iterations to prevent infinite loops) max_iterations = 3 assistant_message = None for iteration in range(max_iterations): # Make API call with Chat Completions API response = await openai_client.chat.completions.create( model="gpt-5", messages=messages, tools=MCP_TOOLS, tool_choice="auto" ) assistant_message = response.choices[0].message # Build assistant message dict assistant_msg = { "role": "assistant", "content": assistant_message.content } # Add tool_calls if present if assistant_message.tool_calls: assistant_msg["tool_calls"] = [ { "id": tc.id, "type": tc.type, "function": { "name": tc.function.name, "arguments": tc.function.arguments } } for tc in assistant_message.tool_calls ] messages.append(assistant_msg) # If no tool calls, we're done if not assistant_message.tool_calls: return assistant_message.content or "I apologize, but I couldn't generate a response." # Execute tool calls tool_results = [] for tool_call in assistant_message.tool_calls: function_name = tool_call.function.name try: arguments = json.loads(tool_call.function.arguments) except json.JSONDecodeError: arguments = {} logger.info(f"Calling tool: {function_name} with args: {arguments}") result = await call_mcp_function(function_name, arguments) # Handle error results - include helpful context for the LLM if result and isinstance(result, dict) and "error" in result: error_msg = result.get("error", "Unknown error") logger.warning(f"Tool {function_name} returned error: {error_msg}") # Still include the error in the tool result so LLM can respond appropriately tool_results.append({ "tool_call_id": tool_call.id, "role": "tool", "name": function_name, "content": json.dumps(result) if result else json.dumps({"error": "Function call failed"}) }) # Add tool results to messages messages.extend(tool_results) # If we've exhausted iterations and still have tool calls, make final call # This ensures we get a text response even if the loop ended with tool calls if assistant_message and assistant_message.tool_calls: # We ended with tool calls, make one more call to get the final response try: logger.info("Making final API call to generate response after tool calls") final_response = await openai_client.chat.completions.create( model="gpt-5", messages=messages, tools=MCP_TOOLS, tool_choice="none" # Force no more tool calls, just get the response ) final_message = final_response.choices[0].message if final_message.content and len(final_message.content.strip()) > 0: logger.info(f"Final response generated: {len(final_message.content)} chars") return final_message.content else: logger.warning("Final response call returned empty content, generating fallback") # Generate response from tool results tool_results_data = [] for msg in messages: if msg.get("role") == "tool" and msg.get("content"): try: tool_result = json.loads(msg.get("content", "{}")) if tool_result and isinstance(tool_result, dict) and "error" not in str(tool_result).lower(): tool_results_data.append(tool_result) except (json.JSONDecodeError, TypeError): pass if tool_results_data: # Create a comprehensive prompt with all tool results results_summary = json.dumps(tool_results_data, indent=2) summary_prompt = f"""Based on the following college football data, provide a helpful and informative response to the user's question: "{message}" Data retrieved: {results_summary[:3000]} Please provide a clear, concise answer based on this data.""" summary_response = await openai_client.chat.completions.create( model="gpt-5", messages=[ {"role": "system", "content": instructions}, {"role": "user", "content": summary_prompt} ] ) summary_content = summary_response.choices[0].message.content if summary_content and len(summary_content.strip()) > 0: logger.info(f"Fallback response generated: {len(summary_content)} chars") return summary_content # Last resort: return a generic message with data hint return "I've retrieved the information you requested, but couldn't format a complete response. Please try rephrasing your question." except Exception as e: logger.error(f"Error in final response call: {e}") import traceback logger.error(traceback.format_exc()) # If we have content, return it if assistant_message and assistant_message.content: return assistant_message.content # Last resort fallback return "I've gathered the information, but couldn't complete the response. Please try rephrasing your question." except Exception as e: logger.error(f"Error in run_agent: {e}") import traceback logger.error(traceback.format_exc()) return f"I encountered an error while processing your request: {str(e)}" @app.post("/chat", response_model=ChatResponse) async def chat( req: ChatRequest, authorization: Optional[str] = Header(None) ): """ Chat endpoint for user messages. Handles authentication, thread management, and agent execution. """ # Simple bearer token auth if not authorization or not authorization.startswith("Bearer "): raise HTTPException(status_code=401, detail="Missing Bearer token") token = authorization.split(" ", 1)[1].strip() if token != APP_TOKEN: raise HTTPException(status_code=403, detail="Invalid token") # Generate or use existing thread ID thread_id = req.thread_id or str(uuid.uuid4()) # Run agent (async) reply = await run_agent(thread_id, req.message.strip()) return ChatResponse(reply=reply, thread_id=thread_id) @app.get("/health") async def health(): """Health check endpoint""" return { "ok": True, "service": "agent-service", "mcp_server_url": MCP_SERVER_URL } if __name__ == "__main__": import uvicorn uvicorn.run(app, host="0.0.0.0", port=8000)

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/gedin-eth/cfb-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server