Skip to main content
Glama
by microsoft
chatcache.ts1.24 kB
import { createCache } from "./cache" import type { ChatCompletionResponse, CreateChatCompletionRequest, } from "./chattypes" import { CHAT_CACHE } from "./constants" import type { LanguageModelConfiguration } from "./server/messages" // Define the type for a cache key, which combines chat completion request // with additional model options, excluding "token" and "source" from the language model configuration. export type ChatCompletionRequestCacheKey = CreateChatCompletionRequest & Omit<LanguageModelConfiguration, "token" | "source"> // Define a JSON line cache type that maps cache keys to cache values. // This cache stores chat completion requests and their associated responses. export type ChatCompletationRequestCache = WorkspaceFileCache< ChatCompletionRequestCacheKey, ChatCompletionResponse > // Function to retrieve a chat completion cache. // It uses a default cache name if none is provided. // This function ensures consistent access to cached chat completions. export function getChatCompletionCache( name?: string ): ChatCompletationRequestCache { return createCache<ChatCompletionRequestCacheKey, ChatCompletionResponse>( name || CHAT_CACHE, { type: "fs" } ) }

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/microsoft/genaiscript'

If you have feedback or need assistance with the MCP directory API, please join our Discord server