llm_mcp_config.json5•4.37 kB
// The configuration file format is [JSON5](https://json5.org/),
// where comments and trailing commas are allowed.
// The file format is further extended to replace `${...}` notations
// with the values of corresponding environment variables.
// Keep all the credentials and private into the `.env` file
// and refer to them with `${...}` notation as needed.
{
"llm": {
"provider": "openai", "model": "gpt-5-mini"
// "provider": "anthropic", "model": "claude-3-5-haiku-latest"
// "provider": "google_genai", "model": "gemini-2.5-flash"
// "provider": "xai", "model": "grok-3-mini"
// "provider": "cerebras", "model": "gpt-oss-120b"
// "provider": "groq", "model": "openai/gpt-oss-20b"
},
"example_queries": [
"Summarize the beginning of the news headlines on BBC.com.",
"Read and briefly summarize the LICENSE file",
"Are there any weather alerts in California?",
// "Tell me about my GitHub profile"",
// "What's the news from Tokyo today?",
// "Open the webpage at bbc.com",
// "Tell me about my Notion account",
],
"mcp_servers": {
// https://github.com/modelcontextprotocol/servers/tree/main/src/filesystem
"filesystem": {
"command": "npx",
"args": [
"-y",
"@modelcontextprotocol/server-filesystem",
"."
]
},
// https://github.com/modelcontextprotocol/servers/tree/main/src/fetch
"fetch": {
"command": "uvx",
"args": [
"mcp-server-fetch"
]
},
// https://github.com/modelcontextprotocol/quickstart-resources/tree/main/weather-server-python
"us-weather": { // US weather only
"command": "npx",
"args": [
"-y",
"@h1deya/mcp-server-weather"
]
},
// // Auto-detection: tries Streamable HTTP first, falls back to SSE
// "auto-detect-server": {
// "url": "http://${SERVER_HOST}:${SERVER_PORT}/..."
// },
// // WebSocket
// "ws-server-name": {
// "url": "ws://${WS_SERVER_HOST}:${WS_SERVER_PORT}/..."
// },
// // Test SSE connection with the auto fallback
// // See the comments at the top of src/index.ts
// us-weather: {
// "url": "http://localhost:${SSE_SERVER_PORT}/sse"
// },
// // Example of authentication via Authorization header
// // https://github.com/github/github-mcp-server?tab=readme-ov-file#remote-github-mcp-server
// "github": {
// // To avoid auto protocol fallback, specify the protocol explicitly when using authentication
// "type": "http", // or `transport: "http",`
// "url": "https://api.githubcopilot.com/mcp",
// "headers": {
// "Authorization": "Bearer ${GITHUB_PERSONAL_ACCESS_TOKEN}"
// }
// },
// // NOTE: When accessing the GitHub MCP server, [GitHub PAT (Personal Access Token)](https://github.com/settings/personal-access-tokens)
// // alone is not enough; your GitHub account must have an active Copilot subscription or be assigned a Copilot license through your organization.
// // https://github.com/microsoft/playwright-mcp
// "playwright": {
// "command": "npx",
// "args": [ "-y", "@playwright/mcp@latest" ]
// },
// // https://github.com/modelcontextprotocol/servers/tree/main/src/brave-search
// "brave-search": {
// "command": "npx",
// "args": [ "-y", "@modelcontextprotocol/server-brave-search"],
// "env": { "BRAVE_API_KEY": "${BRAVE_API_KEY}" }
// },
// notion: {
// "command": "npx",
// "args": ["-y", "@notionhq/notion-mcp-server"],
// "env": {
// // Although the following implies that this MCP server is designed for
// // OpenAI LLMs, it works fine with others models.
// // Tested with Claude and Gemini.
// "OPENAPI_MCP_HEADERS": '{"Authorization": "Bearer ${NOTION_INTEGRATION_SECRET}", "Notion-Version": "2022-06-28"}'
// },
// },
}
}