Skip to main content
Glama
twelvedata

Twelve Data MCP Server

Official
by twelvedata

GetTimeSeriesAtr

Retrieve Average True Range (ATR) time series data to measure market volatility for financial instruments like stocks, forex, and cryptocurrencies.

Instructions

The Average True Range (ATR) is a volatility indicator that measures the average range of price movement over a specified period, helping traders assess market volatility.

Input Schema

TableJSON Schema
NameRequiredDescriptionDefault
paramsYes

Implementation Reference

  • Core execution logic for all MCP tools, including GetTimeSeriesAtr. Makes HTTP request to Twelve Data API, handles API key injection, path param resolution, error handling, and response validation using Pydantic model.
    async def _call_endpoint( endpoint: str, params: P, response_model: Type[R], ctx: Context ) -> R: params.apikey = extract_twelve_data_apikey( twelve_data_apikey=twelve_data_apikey, transport=transport, ctx=ctx ) params_dict = params.model_dump(exclude_none=True) resolved_endpoint = resolve_path_params(endpoint, params_dict) async with httpx.AsyncClient( trust_env=False, headers={ "accept": "application/json", "user-agent": "python-httpx/0.24.0" }, ) as client: resp = await client.get( f"{api_base}/{resolved_endpoint}", params=params_dict ) resp.raise_for_status() resp_json = resp.json() if isinstance(resp_json, dict): status = resp_json.get("status") if status == "error": code = resp_json.get('code') raise HTTPException( status_code=code, detail=f"Failed to perform request," f" code = {code}, message = {resp_json.get('message')}" ) return response_model.model_validate(resp_json)
  • Calls register_all_tools which registers all tools (including GetTimeSeriesAtr) with the FastMCP server when local vector DB exists.
    if vector_db_exists(): register_all_tools(server=server, _call_endpoint=_call_endpoint) u_tool = register_u_tool(
  • Generates the tools.py module containing register_all_tools function, which defines thin wrapper handlers for each Twelve Data API operationId as MCP tools (e.g., @server.tool(name="GetTimeSeriesAtr")).
    def generate_code(ops): def fix_case(name: str) -> str: return name[0].upper() + name[1:] if name.lower().startswith("advanced") else name lines = [ 'from mcp.server import FastMCP', 'from mcp.server.fastmcp import Context', '' ] # Import request models for op, _, _ in ops: lines.append(f'from .request_models import {fix_case(op)}Request') lines.append('') # Import response models for op, _, _ in ops: lines.append(f'from .response_models import {fix_case(op)}200Response') lines.append('') # Register tools lines.append('def register_all_tools(server: FastMCP, _call_endpoint):') for op, desc, key in ops: fixed_op = fix_case(op) lines += [ f' @server.tool(name="{op}",', f' description="{desc}")', f' async def {op}(params: {fixed_op}Request, ctx: Context) -> {fixed_op}200Response:', f' return await _call_endpoint("{key}", params, {fixed_op}200Response, ctx)', '' ] return '\n'.join(lines)
  • Generates Pydantic request models (e.g., GetTimeSeriesAtrRequest) for input validation/schemas from OpenAPI spec for all tools including GetTimeSeriesAtr.
    def main(): # Load the OpenAPI specification with open(OPENAPI_PATH, "r", encoding="utf-8") as f: spec = json.load(f) components = spec.get("components", {}).get("schemas", {}) request_models: List[str] = [] request_names: set = set() for path, methods in spec.get("paths", {}).items(): for http_method, op in methods.items(): opid = op.get("operationId") if not opid: continue class_name = canonical_class_name(opid, "Request") # Collect parameters from path, query, header, etc. props: dict = {} for param in op.get("parameters", []): name = param["name"] sch = param.get("schema", {"type": "string"}) typ = python_type(sch, components) required = param.get("required", False) desc = param.get("description") or sch.get("description") examples = collect_examples(param, sch) default = sch.get("default", None) props[name] = { "type": typ, "required": required, "description": desc, "examples": examples, "default": default, } # Collect JSON body properties body = op.get("requestBody", {}) \ .get("content", {}) \ .get("application/json", {}) \ .get("schema") if body: body_sch = resolve_schema(body, components) for name, sch in body_sch.get("properties", {}).items(): typ = python_type(sch, components) required = name in body_sch.get("required", []) desc = sch.get("description") examples = collect_examples({}, sch) default = sch.get("default", None) props[name] = { "type": typ, "required": required, "description": desc, "examples": examples, "default": default, } if "outputsize" not in props: props["outputsize"] = { "type": "int", "required": False, "description": ( "Number of data points to retrieve. Supports values in the range from `1` to `5000`. " "Default `10` when no date parameters are set, otherwise set to maximum" ), "examples": [10], "default": 10, } else: props["outputsize"]["default"] = 10 props["outputsize"]["description"] = props["outputsize"]["description"].replace( 'Default `30`', 'Default `10`' ) props["outputsize"]["examples"] = [10] # Add apikey with default="demo" props["apikey"] = { "type": "str", "required": False, "description": "API key", "examples": ["demo"], "default": "demo", } if "interval" in props: props["interval"]["required"] = False props["interval"]["default"] = "1day" # Append plan availability to the description if x-starting-plan is present starting_plan = op.get("x-starting-plan") description = op.get("description", "") if starting_plan: addon = f" Available starting from the `{starting_plan}` plan." description = (description or "") + addon code = gen_class(class_name, props, description) if class_name not in request_names: request_models.append(code) request_names.add(class_name) # Write all generated models to the target file header = ( "from pydantic import BaseModel, Field\n" "from typing import Any, List, Optional\n\n" ) Path(REQUESTS_FILE).write_text(header + "\n\n".join(request_models), encoding="utf-8") print(f"Generated request models: {REQUESTS_FILE}")
  • Generates Pydantic response models (e.g., GetTimeSeriesAtr200Response) for output validation from OpenAPI spec.
    import subprocess from pathlib import Path openapi_path = '../extra/openapi_clean.json' output_path = '../data/response_models.py' cmd = [ 'datamodel-codegen', '--input', str(openapi_path), '--input-file-type', 'openapi', '--output', str(output_path), '--output-model-type', 'pydantic_v2.BaseModel', '--reuse-model', '--use-title-as-name', '--disable-timestamp', '--field-constraints', '--use-double-quotes', ] subprocess.run(cmd, check=True) # Append aliases alias_lines = [ '', '# Aliases for response models', 'GetMarketMovers200Response = MarketMoversResponseBody', 'GetTimeSeriesPercent_B200Response = GetTimeSeriesPercentB200Response', '' ] with open(output_path, 'a', encoding='utf-8') as f: f.write('\n'.join(alias_lines)) print(f"[SUCCESS] Models generated using CLI and aliases added: {output_path}")

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/twelvedata/mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server