optimize_prompt
Analyze and improve LLM prompts by scoring clarity, specificity, structure, and completeness. Returns optimized rewrites with explanations of changes for better AI responses.
Instructions
Analyze and improve an LLM prompt. Scores clarity, specificity, structure, and completeness. Returns an optimized rewrite with a summary of what changed and why. Powered by Claude.
Input Schema
TableJSON Schema
| Name | Required | Description | Default |
|---|---|---|---|
| prompt | Yes | The LLM prompt to analyze and/or improve | |
| model | No | Target model (e.g. 'gpt-4o', 'claude-3-5-sonnet') | gpt-4o |
| task | No | What this prompt is trying to accomplish | |
| mode | No | 'both' returns analysis + improved prompt; 'analyze' scores only; 'improve' rewrites only | both |
Implementation Reference
- mcp-server/src/index.ts:664-723 (handler)The "optimize_prompt" tool registration and handler implementation. It uses a helper function 'callToolApi' to communicate with an external Agent Toolbelt API.
server.registerTool( "optimize_prompt", { title: "Prompt Optimizer", description: "Analyze and improve an LLM prompt. Scores clarity, specificity, structure, and completeness. " + "Returns an optimized rewrite with a summary of what changed and why. Powered by Claude.", inputSchema: { prompt: z.string().describe("The LLM prompt to analyze and/or improve"), model: z.string().default("gpt-4o").describe("Target model (e.g. 'gpt-4o', 'claude-3-5-sonnet')"), task: z.string().optional().describe("What this prompt is trying to accomplish"), mode: z .enum(["improve", "analyze", "both"]) .default("both") .describe("'both' returns analysis + improved prompt; 'analyze' scores only; 'improve' rewrites only"), }, }, async ({ prompt, model, task, mode }) => { const result = await callToolApi("prompt-optimizer", { prompt, model, task, mode }); const data = result as any; const r = data.result; const lines: string[] = [`**Prompt Optimizer** (targeting: ${r.model})`]; if (r.scores) { lines.push( "", "**Scores:**", ` Clarity: ${r.scores.clarity}/10`, ` Specificity: ${r.scores.specificity}/10`, ` Structure: ${r.scores.structure}/10`, ` Completeness: ${r.scores.completeness}/10`, ` Overall: ${r.scores.overall}/10` ); } if (r.issues?.length) { lines.push("", "**Issues found:**", ...r.issues.map((i: string) => ` - ${i}`)); } if (r.suggestions?.length) { lines.push("", "**Suggestions:**", ...r.suggestions.map((s: string) => ` - ${s}`)); } if (r.improvedPrompt) { lines.push("", "**Improved prompt:**", "```", r.improvedPrompt, "```"); } if (r.changesSummary?.length) { lines.push("", "**Changes made:**", ...r.changesSummary.map((c: string) => ` - ${c}`)); } lines.push( "", `**Token stats:** original: ${r.tokenStats.original}${r.tokenStats.improved ? ` → improved: ${r.tokenStats.improved} (${r.tokenStats.delta > 0 ? "+" : ""}${r.tokenStats.delta})` : ""}` ); return { content: [{ type: "text" as const, text: lines.join("\n") }] }; } );