Skip to main content
Glama

Git MCP Server

ILlmProvider.ts1.59 kB
/** * @fileoverview Defines the interface for a generic Large Language Model (LLM) provider. * This contract ensures that any LLM service implementation can be used interchangeably. * @module src/services/llm-providers/ILlmProvider */ import type { ChatCompletion, ChatCompletionChunk, ChatCompletionCreateParamsNonStreaming, ChatCompletionCreateParamsStreaming, } from 'openai/resources/chat/completions'; import type { Stream } from 'openai/streaming'; import type { RequestContext } from '@/utils/index.js'; export type OpenRouterChatParams = | ChatCompletionCreateParamsNonStreaming | ChatCompletionCreateParamsStreaming; export interface ILlmProvider { /** * Creates a chat completion. Can be streaming or non-streaming. * @param params - The parameters for the chat completion request. * @param context - The request context for logging and tracing. * @returns A promise that resolves to a ChatCompletion or a Stream of ChatCompletionChunks. */ chatCompletion( params: OpenRouterChatParams, // We can generalize this type later if needed context: RequestContext, ): Promise<ChatCompletion | Stream<ChatCompletionChunk>>; /** * Creates a streaming chat completion. * @param params - The parameters for the chat completion request. * @param context - The request context for logging and tracing. * @returns A promise that resolves to an async iterable of ChatCompletionChunks. */ chatCompletionStream( params: OpenRouterChatParams, context: RequestContext, ): Promise<AsyncIterable<ChatCompletionChunk>>; }

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/cyanheads/git-mcp-server'

If you have feedback or need assistance with the MCP directory API, please join our Discord server