// Validate Prompt Tool - LLM-powered validation
import type { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
import type { ValidationIssue, ValidationResponse } from '../config/types.js';
import {
createErrorResponse,
createSuccessResponse,
ErrorCode,
} from '../lib/errors.js';
import { executeLLMWithJsonResponse } from '../lib/tool-helpers.js';
import { validatePrompt } from '../lib/validation.js';
import {
ValidatePromptInputSchema,
ValidatePromptOutputSchema,
} from '../schemas/index.js';
import { ValidationResponseSchema } from '../schemas/llm-responses.js';
const VALIDATION_SYSTEM_PROMPT = `You are an expert prompt validator. Check prompts for issues, estimate tokens, and detect security risks.
<validation_checks>
1. **Anti-patterns**: Vague language, missing context, overly long sentences
2. **Token limits**: Estimate tokens (1 token ≈ 4 chars), check against model limits
3. **Security**: Prompt injection patterns, script injection
4. **Typos**: Common misspellings
5. **Quality**: Role definition, output format, examples
</validation_checks>
<model_limits>
- claude: 200,000 tokens
- gpt: 128,000 tokens
- gemini: 1,000,000 tokens
- generic: 8,000 tokens
</model_limits>
<output_format>
Return JSON:
{
"isValid": boolean,
"tokenEstimate": number,
"issues": [
{
"type": "error" | "warning" | "info",
"message": string,
"suggestion": string
}
]
}
</output_format>`;
/** Formats a validation issue as markdown */
function formatIssue(issue: ValidationIssue): string {
return `- ${issue.message}\n 💡 ${issue.suggestion ?? 'N/A'}`;
}
/** Formats validation output as markdown report */
function formatValidationOutput(
parsed: ValidationResponse,
targetModel: string
): string {
const errors = parsed.issues.filter((i) => i.type === 'error');
const warnings = parsed.issues.filter((i) => i.type === 'warning');
const infos = parsed.issues.filter((i) => i.type === 'info');
const sections = [
`# Prompt Validation`,
``,
`**Status**: ${parsed.isValid ? '✅ Valid' : '❌ Invalid'}`,
`**Token Estimate**: ~${parsed.tokenEstimate} tokens`,
`**Target Model**: ${targetModel}`,
``,
];
if (errors.length > 0) {
sections.push(
`## ❌ Errors (${errors.length})`,
errors.map(formatIssue).join('\n'),
``
);
}
if (warnings.length > 0) {
sections.push(
`## ⚠️ Warnings (${warnings.length})`,
warnings.map(formatIssue).join('\n'),
``
);
}
if (infos.length > 0) {
sections.push(
`## ℹ️ Info (${infos.length})`,
infos.map(formatIssue).join('\n'),
``
);
}
sections.push(
``,
parsed.isValid
? '✅ Prompt is ready to use!'
: '❌ Fix errors before using this prompt.'
);
return sections.join('\n');
}
/**
* Registers the validate_prompt tool with the MCP server.
* Uses external LLM for validation.
*/
export function registerValidatePromptTool(server: McpServer): void {
server.registerTool(
'validate_prompt',
{
title: 'Validate Prompt',
description:
'Pre-flight validation using AI: checks issues, estimates tokens, detects security risks. Returns isValid boolean and categorized issues.',
inputSchema: ValidatePromptInputSchema,
outputSchema: ValidatePromptOutputSchema,
annotations: {
readOnlyHint: true,
idempotentHint: false,
openWorldHint: false,
},
},
async ({
prompt,
targetModel = 'generic',
checkInjection = true,
}: {
prompt: string;
targetModel?: string;
checkInjection?: boolean;
}) => {
try {
const validatedPrompt = validatePrompt(prompt);
const validationPrompt = `${VALIDATION_SYSTEM_PROMPT}\n\nTarget Model: ${targetModel}\nCheck Injection: ${String(checkInjection)}\n\nPROMPT TO VALIDATE:\n${validatedPrompt}`;
const parsed = await executeLLMWithJsonResponse<ValidationResponse>(
validationPrompt,
(value) => ValidationResponseSchema.parse(value),
ErrorCode.E_LLM_FAILED,
'validate_prompt',
{ maxTokens: 1000 }
);
const errors = parsed.issues.filter((i) => i.type === 'error');
const hasInjectionIssue =
checkInjection && errors.some((e) => e.message.includes('injection'));
const output = formatValidationOutput(parsed, targetModel);
return createSuccessResponse(output, {
ok: true,
isValid: parsed.isValid,
issues: parsed.issues,
tokenEstimate: parsed.tokenEstimate,
securityFlags: hasInjectionIssue ? ['injection_detected'] : [],
});
} catch (error) {
return createErrorResponse(error, ErrorCode.E_INVALID_INPUT, prompt);
}
}
);
}