get_related_tickers
Find stocks related to a given ticker by industry or peer group. Input a stock symbol to discover similar companies.
Instructions
Tickers related to the given symbol (similar industry/peers).
Input Schema
| Name | Required | Description | Default |
|---|---|---|---|
| ticker | Yes | Stock symbol. |
Output Schema
| Name | Required | Description | Default |
|---|---|---|---|
No arguments | |||
Implementation Reference
- src/massive_mcp/tools/tickers.py:48-55 (handler)The actual handler function for the 'get_related_tickers' tool. Calls client.get('/v1/related-companies/{ticker}').
@mcp.tool() async def get_related_tickers(ticker: str) -> dict[str, Any]: """Tickers related to the given symbol (similar industry/peers). Args: ticker: Stock symbol. """ return await client.get(f"/v1/related-companies/{ticker}") - src/massive_mcp/tools/tickers.py:10-48 (registration)The 'register' function in this module uses the @mcp.tool() decorator to register the get_related_tickers function as an MCP tool.
def register(mcp: FastMCP, client: MassiveClient) -> None: @mcp.tool() async def list_tickers( search: str | None = None, market: str | None = "stocks", active: bool = True, limit: int = 50, cursor: str | None = None, ) -> dict[str, Any]: """Search/list tickers in the Massive reference universe. Args: search: Free-text search (e.g. "apple"). market: "stocks", "otc", "crypto", "fx", "indices". Default "stocks". active: Only currently-active tickers. Default true. limit: Max rows. Default 50. cursor: Pagination cursor. """ return await client.get( "/v3/reference/tickers", { "search": search, "market": market, "active": str(active).lower(), "limit": limit, "cursor": cursor, }, ) @mcp.tool() async def get_ticker_overview(ticker: str) -> dict[str, Any]: """Reference details for a ticker: name, exchange, sector, market cap, description, etc. Args: ticker: Stock symbol. """ return await client.get(f"/v3/reference/tickers/{ticker}") @mcp.tool() - src/massive_mcp/server.py:36-50 (registration)The server iterates over tool modules and calls 'register(mcp, client)', which registers all tools including get_related_tickers.
for module in ( aggregates, quotes, snapshots, tickers, news, reference, indicators, corporate, financials, ): module.register(mcp, client) return mcp, client - src/massive_mcp/client.py:47-89 (helper)The MassiveClient.get method used by the handler to make the actual HTTP GET request to the Massive API.
async def get( self, path: str, params: dict[str, Any] | None = None, *, trim: bool = True ) -> dict[str, Any]: merged: dict[str, Any] = {k: v for k, v in (params or {}).items() if v is not None} if self._settings.auth_mode == "query": merged["apiKey"] = self._settings.api_key last_exc: Exception | None = None for attempt in range(MAX_RETRIES): try: resp = await self._http.get(path, params=merged) except httpx.HTTPError as exc: last_exc = exc await asyncio.sleep(2**attempt) continue if resp.status_code == 429: retry_after = float(resp.headers.get("Retry-After", 2**attempt)) await asyncio.sleep(min(retry_after, 30)) continue if 500 <= resp.status_code < 600 and attempt < MAX_RETRIES - 1: await asyncio.sleep(2**attempt) continue if resp.status_code == 401: hint = ( "auth rejected — verify MASSIVE_API_KEY; " "if you used MASSIVE_AUTH_MODE=bearer, try 'query' (or vice versa)" ) raise MassiveAPIError(401, hint, _strip_secrets(str(resp.request.url))) try: data = resp.json() except ValueError: data = {"raw": resp.text} if not resp.is_success: msg = data.get("error") or data.get("message") or resp.reason_phrase or "request failed" raise MassiveAPIError(resp.status_code, str(msg), _strip_secrets(str(resp.request.url))) return _trim(data) if trim else data raise MassiveAPIError(0, f"network error after {MAX_RETRIES} retries: {last_exc}", path) - src/massive_mcp/client.py:92-115 (helper)The _trim helper truncates large result arrays and extracts pagination cursors from responses.
def _trim(data: dict[str, Any]) -> dict[str, Any]: """If `results` is a huge array, truncate and surface a hint to paginate.""" results = data.get("results") if isinstance(results, list) and len(results) > TRIM_THRESHOLD: kept = results[:TRIM_THRESHOLD] data = dict(data) data["results"] = kept data["_truncated_note"] = ( f"response had {len(results)} items; truncated to {TRIM_THRESHOLD}. " "Re-call with a tighter `limit` or use `cursor`/`next_url` to page." ) if "next_url" in data and data.get("next_url"): cursor = _extract_cursor(data["next_url"]) if cursor: data["next_cursor"] = cursor return data def _extract_cursor(next_url: str) -> str | None: parts = urlsplit(next_url) for kv in parts.query.split("&"): if kv.startswith("cursor="): return kv.split("=", 1)[1] return None