Skip to main content
Glama
tarun7r

cricket-mcp-server

get_live_commentary

Fetch recent live cricket commentary events for any Cricbuzz match by providing the match URL and specifying the number of items to retrieve.

Instructions

Get recent live commentary events for a Cricbuzz match.

Args: match_url (str): Cricbuzz match URL. Can be a general match page; the commentary tab will be resolved automatically. limit (int): Maximum number of recent commentary items to return.

Returns: dict: {"title": str, "commentary_url": str, "events": [{"text": str}]}

Input Schema

TableJSON Schema
NameRequiredDescriptionDefault
limitNo
match_urlYes

Implementation Reference

  • The main handler function for the get_live_commentary tool. It is decorated with @mcp.tool() for automatic registration in FastMCP. The function fetches live commentary primarily from the Cricbuzz JSON API, falling back to HTML scraping if needed. It extracts match title, commentary events, and handles errors gracefully.
    def get_live_commentary(match_url: str, limit: int = 20) -> dict: """ Get recent live commentary events for a Cricbuzz match. Args: match_url (str): Cricbuzz match URL. Can be a general match page; the commentary tab will be resolved automatically. limit (int): Maximum number of recent commentary items to return. Returns: dict: {"title": str, "commentary_url": str, "events": [{"text": str}]} """ if not match_url or "cricbuzz.com" not in match_url: return {"error": "A valid Cricbuzz match URL is required."} # Try official JSON commentary API first (more reliable than HTML scraping) match_id_match = re.search(r"/(\d{5,7})/", match_url) if not match_id_match: return {"error": "Could not extract match id from URL."} match_id = match_id_match.group(1) api_url = f"https://www.cricbuzz.com/api/cricket-match/commentary/{match_id}" try: resp = requests.get(api_url, headers=HEADERS, timeout=15) resp.raise_for_status() data = resp.json() except Exception as e: data = None def _clean_comm_text(text: str) -> str: # Remove formatting markers like "B0$", "I0$" etc. t = re.sub(r"[A-Z]\d\$", "", text) # Normalize whitespace t = re.sub(r"\s+", " ", t).strip() return t if isinstance(data, dict) and data.get("commentaryList"): header = data.get("matchHeader", {}) miniscore = data.get("miniscore", {}) title_parts = [] if header.get("matchDescription"): title_parts.append(header.get("matchDescription")) if header.get("status"): title_parts.append(header.get("status")) title = " - ".join(title_parts) if title_parts else None events = [] for item in data.get("commentaryList", [])[: max(0, limit)]: text = _clean_comm_text(str(item.get("commText", "")).strip()) if not text: continue ev = {"text": text} if item.get("event"): ev["event"] = item.get("event") if item.get("ballNbr") is not None: ev["ball"] = item.get("ballNbr") events.append(ev) return { "title": title, "commentary_url": match_url, "events": events, } # If JSON API fails, fall back to HTML scraping heuristics def _fetch(url: str) -> BeautifulSoup | None: try: resp = requests.get(url, headers=HEADERS, timeout=15) resp.raise_for_status() return BeautifulSoup(resp.text, "lxml") except Exception: return None page = _fetch(match_url) if not page: return {"error": "Failed to load match page. The match might not be live or the URL may be incorrect."} commentary_url = None try: nav = page.find("div", class_=re.compile("cb-nav-pills")) if nav: for a in nav.find_all("a", href=True): if "commentary" in a.get("href", "").lower() or (a.text and "commentary" in a.text.lower()): commentary_url = ("https://www.cricbuzz.com" + a["href"]) if a["href"].startswith("/") else a["href"] break except Exception: pass if not commentary_url: commentary_url = match_url.rstrip("/") + "/commentary" cpage = _fetch(commentary_url) if not cpage: return {"error": "Failed to load commentary page. This match may not have live commentary available."} result: dict = {"title": None, "commentary_url": commentary_url, "events": []} title_tag = cpage.find("h1", class_=re.compile("cb-nav-hdr")) if title_tag: result["title"] = title_tag.text.strip() candidates = [] candidates.extend(cpage.find_all("div", class_=re.compile(r"cb-col\s+cb-col-90\s+cb-com-ln"))) if not candidates: lst = cpage.find("div", class_=re.compile("cb-com-lst")) if lst: candidates.extend(lst.find_all("div", class_=re.compile(r"cb-col\s+cb-col-90"))) if not candidates: candidates.extend(cpage.find_all("p", class_=re.compile("cb-com-ln"))) if not candidates: candidates.extend(cpage.find_all("div", class_=re.compile("cb-com-ln"))) if not candidates: for div in cpage.find_all("div"): text = div.get_text(" ", strip=True) if text and len(text) > 20 and ("ball" in text.lower() or "over" in text.lower() or "wicket" in text.lower()): candidates.append(div) events = [] for node in candidates: try: text = node.get_text(" ", strip=True) if not text: continue if text.lower().startswith("commentary"): continue if len(text) < 10: continue events.append({"text": text}) if len(events) >= limit: break except Exception: continue result["events"] = events if not events: result["note"] = "No commentary items found. This match may not be live or commentary may not be available." try: match_info = get_match_details(match_url) if "error" not in match_info: result["fallback"] = "Commentary not available, but here's the match details:" result["match_details"] = match_info except Exception: pass return result

Other Tools

Related Tools

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/tarun7r/cricket-mcp-server'

If you have feedback or need assistance with the MCP directory API, please join our Discord server