import type { Post } from "../types.js";
// Using Anthropic's Claude API for internal LLM calls
// Falls back to OpenAI if OPENAI_API_KEY is set and ANTHROPIC_API_KEY is not
interface LLMConfig {
apiKey: string;
provider: "anthropic" | "openai";
}
function getLLMConfig(): LLMConfig {
const anthropicKey = process.env.ANTHROPIC_API_KEY;
const openaiKey = process.env.OPENAI_API_KEY;
if (anthropicKey) {
return { apiKey: anthropicKey, provider: "anthropic" };
}
if (openaiKey) {
return { apiKey: openaiKey, provider: "openai" };
}
throw new Error(
"Either ANTHROPIC_API_KEY or OPENAI_API_KEY environment variable is required"
);
}
async function callLLM(prompt: string): Promise<string> {
const config = getLLMConfig();
if (config.provider === "anthropic") {
const response = await fetch("https://api.anthropic.com/v1/messages", {
method: "POST",
headers: {
"Content-Type": "application/json",
"x-api-key": config.apiKey,
"anthropic-version": "2023-06-01",
},
body: JSON.stringify({
model: "claude-3-5-haiku-20241022",
max_tokens: 1024,
messages: [{ role: "user", content: prompt }],
}),
});
if (!response.ok) {
const error = await response.text();
throw new Error(`Anthropic API error: ${error}`);
}
const data = (await response.json()) as {
content: Array<{ type: string; text: string }>;
};
return data.content[0]?.text || "";
} else {
const response = await fetch("https://api.openai.com/v1/chat/completions", {
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${config.apiKey}`,
},
body: JSON.stringify({
model: "gpt-4o-mini",
max_tokens: 1024,
messages: [{ role: "user", content: prompt }],
}),
});
if (!response.ok) {
const error = await response.text();
throw new Error(`OpenAI API error: ${error}`);
}
const data = (await response.json()) as {
choices: Array<{ message: { content: string } }>;
};
return data.choices[0]?.message?.content || "";
}
}
export async function generateSearchQueries(target: string): Promise<string[]> {
const prompt = `You are helping find funny roasts, jokes, and memes about "${target}" on Twitter/X.
Generate 5-8 effective Twitter search queries that would find humorous content, roasts, or slander about this person/character. Consider:
- Common nicknames or mocking names for them
- Known failures, embarrassing moments, or running jokes
- Meme formats or hashtags associated with them
- Character-specific jokes (for fictional characters)
Return ONLY a JSON array of search query strings, nothing else. Example format:
["query 1", "query 2", "query 3"]`;
const response = await callLLM(prompt);
try {
// Extract JSON array from response
const jsonMatch = response.match(/\[[\s\S]*\]/);
if (jsonMatch) {
return JSON.parse(jsonMatch[0]) as string[];
}
throw new Error("No JSON array found in response");
} catch {
// Fallback to basic queries if parsing fails
return [
`${target} ratio`,
`${target} meme`,
`${target} roast`,
`${target} funny`,
];
}
}
export async function checkBatchQuality(
target: string,
posts: Post[]
): Promise<boolean> {
if (posts.length === 0) {
return false;
}
const postSummaries = posts
.slice(0, 20)
.map(
(p) =>
`- "${p.text.slice(0, 200)}..." (${p.likes} likes, ${p.retweets} RTs)`
)
.join("\n");
const prompt = `You are evaluating a batch of Twitter posts found when searching for roasts/jokes about "${target}".
Here are the posts found:
${postSummaries}
Evaluate: Does this batch contain at least 3 posts that are actually funny roasts, jokes, or memes about ${target}?
Consider: relevance to target, humor value, engagement as social proof.
Respond with ONLY "yes" or "no".`;
const response = await callLLM(prompt);
return response.toLowerCase().includes("yes");
}
export async function extractNicknames(
target: string,
posts: Post[]
): Promise<string[]> {
if (posts.length === 0) {
return [];
}
const postTexts = posts
.slice(0, 30)
.map((p) => `- ${p.text}`)
.join("\n");
const prompt = `Analyze these Twitter posts about "${target}" and extract any nicknames, mocking names, or slang terms used to refer to them.
Posts:
${postTexts}
List ONLY the nicknames/slang terms found (not the original name). Return as a JSON array.
If no nicknames are found, return an empty array [].
Example format: ["LeChoke", "LeBum", "the king of excuses"]`;
const response = await callLLM(prompt);
try {
const jsonMatch = response.match(/\[[\s\S]*\]/);
if (jsonMatch) {
const nicknames = JSON.parse(jsonMatch[0]) as string[];
// Filter out empty strings and duplicates
return [...new Set(nicknames.filter((n) => n && n.trim()))];
}
return [];
} catch {
return [];
}
}