llm.ts•5.4 kB
/**
* LLM Client - Unified interface for calling different LLM providers
*/
import { PROVIDERS, getApiKey } from "./providers.js";
export interface Message {
role: "system" | "user" | "assistant";
content: string;
}
export interface LLMResponse {
content: string;
model: string;
provider: string;
}
interface OpenAIResponse {
choices: Array<{
message: {
content: string;
};
}>;
model: string;
}
interface AnthropicResponse {
content: Array<{
type: string;
text: string;
}>;
model: string;
}
interface GoogleResponse {
candidates: Array<{
content: {
parts: Array<{
text: string;
}>;
};
}>;
modelVersion: string;
}
/**
* Call an OpenAI-compatible API
*/
async function callOpenAICompatible(
baseUrl: string,
apiKey: string,
model: string,
messages: Message[],
temperature: number,
provider: string,
extraHeaders: Record<string, string> = {},
extraBody: Record<string, unknown> = {}
): Promise<LLMResponse> {
const response = await fetch(`${baseUrl}/chat/completions`, {
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${apiKey}`,
...extraHeaders,
},
body: JSON.stringify({
model,
messages,
temperature,
...extraBody,
}),
});
if (!response.ok) {
const error = await response.text();
throw new Error(`${provider} API error (${response.status}): ${error}`);
}
const data = (await response.json()) as OpenAIResponse;
return {
content: data.choices[0]?.message?.content || "",
model: data.model,
provider,
};
}
/**
* Call Anthropic's Messages API
*/
async function callAnthropic(
apiKey: string,
model: string,
messages: Message[],
temperature: number
): Promise<LLMResponse> {
// Anthropic requires system message to be separate
const systemMessage = messages.find((m) => m.role === "system");
const otherMessages = messages.filter((m) => m.role !== "system");
const response = await fetch("https://api.anthropic.com/v1/messages", {
method: "POST",
headers: {
"Content-Type": "application/json",
"x-api-key": apiKey,
"anthropic-version": "2023-06-01",
},
body: JSON.stringify({
model,
max_tokens: 8192,
temperature,
system: systemMessage?.content || "",
messages: otherMessages.map((m) => ({
role: m.role,
content: m.content,
})),
}),
});
if (!response.ok) {
const error = await response.text();
throw new Error(`Anthropic API error (${response.status}): ${error}`);
}
const data = (await response.json()) as AnthropicResponse;
const textContent = data.content.find((c) => c.type === "text");
return {
content: textContent?.text || "",
model: data.model,
provider: "anthropic",
};
}
/**
* Call Google's Generative AI API
*/
async function callGoogle(
apiKey: string,
model: string,
messages: Message[],
temperature: number
): Promise<LLMResponse> {
// Google uses a different format - convert messages to contents
const systemMessage = messages.find((m) => m.role === "system");
const otherMessages = messages.filter((m) => m.role !== "system");
const contents = otherMessages.map((m) => ({
role: m.role === "assistant" ? "model" : "user",
parts: [{ text: m.content }],
}));
const response = await fetch(
`https://generativelanguage.googleapis.com/v1beta/models/${model}:generateContent?key=${apiKey}`,
{
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
contents,
systemInstruction: systemMessage
? { parts: [{ text: systemMessage.content }] }
: undefined,
generationConfig: {
temperature,
},
}),
}
);
if (!response.ok) {
const error = await response.text();
throw new Error(`Google API error (${response.status}): ${error}`);
}
const data = (await response.json()) as GoogleResponse;
const textContent = data.candidates?.[0]?.content?.parts?.[0]?.text || "";
return {
content: textContent,
model: data.modelVersion || model,
provider: "google",
};
}
/**
* Main function to call any supported LLM
*/
export async function callLLM(
provider: string,
model: string,
messages: Message[],
temperature: number = 0.7
): Promise<LLMResponse> {
const config = PROVIDERS[provider];
if (!config) {
throw new Error(`Unknown provider: ${provider}`);
}
const apiKey = getApiKey(provider);
// Handle non-OpenAI-compatible providers
if (provider === "anthropic") {
return callAnthropic(apiKey, model, messages, temperature);
}
if (provider === "google") {
return callGoogle(apiKey, model, messages, temperature);
}
// OpenRouter special case: add provider routing for Fireworks preference
if (provider === "openrouter") {
return callOpenAICompatible(
config.baseUrl,
apiKey,
model,
messages,
temperature,
config.name,
{},
{
provider: {
order: ["Fireworks"],
allow_fallbacks: true,
},
}
);
}
// All other OpenAI-compatible providers
return callOpenAICompatible(
config.baseUrl,
apiKey,
model,
messages,
temperature,
config.name
);
}