#!/usr/bin/env node
/**
* MCP Server: Generate test cases from documentation with rules.
* Tool: generate_testcases — accepts document + rules, returns test cases (or formatted prompt).
*/
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio";
import * as fs from "fs-extra";
import * as path from "path";
import { z } from "zod";
const DEFAULT_SYSTEM_PROMPT = `You are a QA expert. Your task: generate a set of manual test cases based on the provided documentation and rules.
- Output: a clear list of test cases, each with: ID, Description, Preconditions, Steps, Expected result, Priority (if applicable).
- Format: Markdown or table, easy to read.
- Follow the given rules strictly.`;
function buildPrompt(documentContent: string, rules: string): string {
return `## Reference documentation
${documentContent}
---
## Rules to follow
${rules}
---
Generate a set of manual test cases that reasonably cover the requirements in the documentation and follow the rules above. Output in the same language as the documentation.`;
}
const mcpServer = new McpServer({
name: "mcp-manual-testcases",
version: "1.0.0",
});
// Tool: generate_testcases — document + rules -> test cases (or formatted prompt)
mcpServer.registerTool(
"generate_testcases",
{
description:
"Generate manual test cases from document content and rules. You can pass document content directly (document_content) or a file path (document_path). If the client supports sampling (LLM), it will call the LLM to generate test cases; otherwise returns a formatted prompt for use with an external LLM.",
inputSchema: {
document_content: z
.string()
.optional()
.describe(
"Document content (text). Omit if using document_path."
),
document_path: z
.string()
.optional()
.describe(
"Path to the document file (txt, md, or PDF). Prefer this when available."
),
output_format: z
.enum(["markdown", "csv"])
.optional()
.default("markdown")
.describe(
"Output format: markdown (default) or csv. CSV uses columns: 模块,标题,前置条件,步骤描述,预期结果,test1测试人员,test1测试结果,buglink,PRE测试人员,PRE测试结果,buglink."
),
rules: z
.string()
.describe(
"Rules for generating test cases (e.g. format, priority, scope, language)."
),
use_llm: z
.boolean()
.optional()
.default(true)
.describe(
"True: if client supports sampling, call LLM to generate test cases. False: only return the formatted prompt."
),
max_tokens: z
.number()
.optional()
.default(4096)
.describe("Max tokens for the LLM response (when use_llm=true)."),
},
},
async ({
document_content,
document_path,
rules,
use_llm,
max_tokens,
output_format,
}) => {
let documentContent = document_content ?? "";
if (document_path) {
try {
const resolved = path.isAbsolute(document_path)
? document_path
: path.resolve(process.cwd(), document_path);
const ext = path.extname(resolved).toLowerCase();
if (ext === ".pdf") {
const mod = await import("pdf-parse");
const data = await fs.readFile(resolved);
const parser = new mod.PDFParse({ data });
try {
const result = await parser.getText();
documentContent = result?.text ?? "";
} finally {
await parser.destroy();
}
} else {
documentContent = await fs.readFile(resolved, "utf-8");
}
} catch (err) {
const message =
err instanceof Error ? err.message : String(err);
return {
content: [
{ type: "text" as const, text: `Error reading file: ${message}` },
],
isError: true,
};
}
}
if (!documentContent.trim()) {
return {
content: [
{
type: "text" as const,
text:
"Please provide document_content or document_path to a document file with content.",
},
],
isError: true,
};
}
const outputCsv = output_format === "csv";
const csvSystemPrompt = outputCsv
? `You are a QA expert. Generate manual test cases from the documentation and rules.
Output ONLY valid CSV with this exact header (no extra text, no markdown):
模块,标题,前置条件,步骤描述,预期结果,test1测试人员,test1测试结果,buglink,PRE测试人员,PRE测试结果,buglink
- Use comma as separator. If a cell contains comma or newline or double quote, wrap the cell in double quotes and escape " as "".
- 模块: Chinese module path. 标题, 前置条件, 步骤描述, 预期结果: English. Leave execution columns empty.`
: null;
const systemPrompt = csvSystemPrompt ?? DEFAULT_SYSTEM_PROMPT;
const prompt = buildPrompt(documentContent, rules);
if (!use_llm) {
const out =
`## Formatted prompt (for use with LLM)\n\n### System prompt (suggested)\n${systemPrompt}\n\n### User prompt\n${prompt}`;
return {
content: [{ type: "text" as const, text: out }],
};
}
// Call LLM via client (sampling) if available
try {
const server = mcpServer.server as { createMessage?: (params: {
messages: Array<{
role: string;
content: { type: string; text: string };
}>;
maxTokens?: number;
systemPrompt?: string;
}) => Promise<{ content: { type: string; text?: string } }> };
if (typeof server.createMessage === "function") {
const response = await server.createMessage({
systemPrompt,
messages: [
{
role: "user",
content: { type: "text", text: prompt },
},
],
maxTokens: max_tokens,
});
const text: string =
response?.content?.type === "text" && response.content.text != null
? response.content.text
: "No content received from LLM.";
return {
content: [{ type: "text" as const, text }],
};
}
} catch (e) {
const msg = e instanceof Error ? e.message : String(e);
return {
content: [
{
type: "text" as const,
text: `Client does not support sampling or error: ${msg}. Use use_llm: false to get the formatted prompt.`,
},
],
isError: true,
};
}
// Fallback: return formatted prompt
const fallbackText = `Client does not support sampling (LLM). Use the prompt below with an external LLM:\n\n### System\n${DEFAULT_SYSTEM_PROMPT}\n\n### User\n${prompt}`;
return {
content: [{ type: "text" as const, text: fallbackText }],
};
}
);
async function main() {
const transport = new StdioServerTransport();
await mcpServer.connect(transport);
}
main().catch((err) => {
console.error("MCP server error:", err);
process.exit(1);
});