Skip to main content
Glama
types.ts•4.6 kB
export type LLMProvider = | "chatgpt" | "claude" | "deepseek" | "gemini" | "grok" | "kimi" | "perplexity" | "mistral"; export type LLMResponse = { provider: LLMProvider; response: string; model?: string; usage?: { prompt_tokens?: number; completion_tokens?: number; total_tokens?: number; }; error?: string; }; export type LLMRequest = { prompt: string; model?: string; temperature?: number; max_tokens?: number; }; export type ChatGPTMessage = { role: "system" | "user" | "assistant"; content: string; }; export type ChatGPTRequest = { model: string; messages: ChatGPTMessage[]; temperature?: number; max_tokens?: number; }; export type ChatGPTResponse = { id: string; object: string; created: number; model: string; choices: Array<{ index: number; message: { role: string; content: string; }; finish_reason: string; }>; usage: { prompt_tokens: number; completion_tokens: number; total_tokens: number; }; }; export type ClaudeRequest = { model: string; max_tokens: number; messages: Array<{ role: "user" | "assistant"; content: string; }>; temperature?: number; }; export type ClaudeResponse = { id: string; type: string; role: string; content: Array<{ type: string; text: string; }>; model: string; stop_reason: string; stop_sequence: string | null; usage: { input_tokens: number; output_tokens: number; }; }; export type DeepSeekRequest = { model: string; messages: Array<{ role: "system" | "user" | "assistant"; content: string; }>; temperature?: number; max_tokens?: number; }; export type DeepSeekResponse = { id: string; object: string; created: number; model: string; choices: Array<{ index: number; message: { role: string; content: string; }; finish_reason: string; }>; usage: { prompt_tokens: number; completion_tokens: number; total_tokens: number; }; }; export type GeminiRequest = { contents: Array<{ parts: Array<{ text: string; }>; }>; generationConfig?: { temperature?: number; maxOutputTokens?: number; }; }; export type GeminiResponse = { candidates: Array<{ content: { parts: Array<{ text: string; }>; }; finishReason: string; }>; usageMetadata: { promptTokenCount: number; candidatesTokenCount: number; totalTokenCount: number; }; }; export type GrokRequest = { model: string; messages: Array<{ role: "system" | "user" | "assistant"; content: string; }>; temperature?: number; max_tokens?: number; }; export type GrokResponse = { id: string; object: string; created: number; model: string; choices: Array<{ index: number; message: { role: string; content: string; }; finish_reason: string; }>; usage: { prompt_tokens: number; completion_tokens: number; total_tokens: number; }; }; export type KimiRequest = { model: string; messages: Array<{ role: "system" | "user" | "assistant"; content: string; }>; temperature?: number; max_tokens?: number; }; export type KimiResponse = { id: string; object: string; created: number; model: string; choices: Array<{ index: number; message: { role: string; content: string; }; finish_reason: string; }>; usage: { prompt_tokens: number; completion_tokens: number; total_tokens: number; }; }; export type PerplexityRequest = { model: string; messages: Array<{ role: "system" | "user" | "assistant"; content: string; }>; temperature?: number; max_tokens?: number; }; export type PerplexityResponse = { id: string; object: string; created: number; model: string; choices: Array<{ index: number; message: { role: string; content: string; }; finish_reason: string; }>; usage: { prompt_tokens: number; completion_tokens: number; total_tokens: number; }; }; export type MistralRequest = { model: string; messages: Array<{ role: "system" | "user" | "assistant"; content: string; }>; temperature?: number; max_tokens?: number; }; export type MistralResponse = { id: string; object: string; created: number; model: string; choices: Array<{ index: number; message: { role: string; content: string; }; finish_reason: string; }>; usage: { prompt_tokens: number; completion_tokens: number; total_tokens: number; }; };

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/JamesANZ/cross-llm-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server