Skip to main content
Glama
Yoosu-L
by Yoosu-L
pyproject.toml643 B
[project] name = "llm-api-benchmark-mcp-server" version = "0.1.4" description = "Runs a throughput benchmark for LLM APIs, measuring generation throughput, prompt throughput, and Time To First Token (TTFT) under various concurrency levels." readme = "README.md" requires-python = ">=3.12" dependencies = [ "mcp[cli]>=1.9.3", "numpy>=2.3.1", "openai>=1.90.0", "requests>=2.32.4", ] [project.scripts] llm-api-benchmark-mcp-server = "llm_api_benchmark_mcp_server.main:main" [tool] [tool.pytest.ini_options] pythonpath = "src" [dependency-groups] dev = [ "pytest>=8.4.1", "pytest-asyncio>=1.0.0", "respx>=0.22.0", ]

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/Yoosu-L/llm-api-benchmark-mcp-server'

If you have feedback or need assistance with the MCP directory API, please join our Discord server