get_world_news
Retrieve a multi-source snapshot of world headlines. Obtain a broad view of global news from multiple sources.
Instructions
Fetch a multi-source snapshot of world headlines.
Input Schema
| Name | Required | Description | Default |
|---|---|---|---|
No arguments | |||
Output Schema
| Name | Required | Description | Default |
|---|---|---|---|
| result | Yes |
Implementation Reference
- src/friday_mcp_server/tools/web.py:65-70 (handler)The 'get_world_news' tool handler: fetches world headlines concurrently from BBC, NYT, and Al Jazeera RSS feeds, returning a flat list of dicts with source, title, summary, and link.
@mcp.tool() async def get_world_news() -> list[dict[str, str]]: """Fetch a multi-source snapshot of world headlines.""" async with httpx.AsyncClient(follow_redirects=True, timeout=15.0) as client: results = await asyncio.gather(*[_fetch_feed(client, url) for url in SEED_FEEDS]) return [item for group in results for item in group] - SEED_FEEDS list defining the RSS sources (BBC World, NYT World, Al Jazeera) that 'get_world_news' fetches from.
SEED_FEEDS = [ "https://feeds.bbci.co.uk/news/world/rss.xml", "https://rss.nytimes.com/services/xml/rss/nyt/World.xml", "https://www.aljazeera.com/xml/rss/all.xml", ] - Helper function '_fetch_feed' that parses an RSS feed XML, extracts up to 4 items with title, summary (HTML stripped), link, and source domain.
async def _fetch_feed(client: httpx.AsyncClient, url: str) -> list[dict[str, str]]: response = await client.get(url, headers={"User-Agent": "Friday-MCP-Server/0.1"}) response.raise_for_status() root = ET.fromstring(response.content) feed_items: list[dict[str, str]] = [] for item in root.findall(".//item")[:4]: description = item.findtext("description") or "" feed_items.append( { "source": url.split("/")[2], "title": item.findtext("title") or "Untitled", "summary": re.sub(r"<[^>]+>", "", description).strip(), "link": item.findtext("link") or "", } ) return feed_items - src/friday_mcp_server/tools/web.py:37-70 (registration)The 'register' function that registers all web tools (search_web, fetch_url, get_world_news) on the MCP server instance via @mcp.tool() decorators.
def register(mcp, *, config) -> None: @mcp.tool() async def search_web(query: str, max_results: int = 5) -> list[dict[str, str]]: """Search the web with DuckDuckGo and return the top results.""" def _search() -> list[dict[str, str]]: from duckduckgo_search import DDGS with DDGS() as ddgs: return [ { "title": result.get("title", ""), "snippet": result.get("body", ""), "url": result.get("href", ""), } for result in ddgs.text(query, max_results=max_results) ] return await asyncio.to_thread(_search) @mcp.tool() async def fetch_url(url: str) -> str: """Fetch the raw text of a URL, truncated to the configured limit.""" async with httpx.AsyncClient(follow_redirects=True, timeout=15.0) as client: response = await client.get(url) response.raise_for_status() return response.text[: config.max_fetch_chars] @mcp.tool() async def get_world_news() -> list[dict[str, str]]: """Fetch a multi-source snapshot of world headlines.""" async with httpx.AsyncClient(follow_redirects=True, timeout=15.0) as client: results = await asyncio.gather(*[_fetch_feed(client, url) for url in SEED_FEEDS]) return [item for group in results for item in group] - src/friday_mcp_server/tools/__init__.py:6-11 (registration)Tool registry module that calls 'web.register(mcp, config=config)' to register the web tools (including get_world_news) on the MCP server.
def register_all_tools(mcp, *, config, skill_store) -> None: system.register(mcp, config=config) utils.register(mcp) web.register(mcp, config=config) workspace.register(mcp, config=config) skills.register(mcp, skill_store=skill_store)