/**
* MCP Server Configuration
* Sets up the Model Context Protocol server with tools and resources
*/
import { Server } from '@modelcontextprotocol/sdk/server/index.js';
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
import {
CallToolRequestSchema,
ListToolsRequestSchema,
ListResourcesRequestSchema,
ReadResourceRequestSchema,
ErrorCode,
McpError,
} from '@modelcontextprotocol/sdk/types.js';
import {
generatePrompt,
generatePromptSchema,
refinePrompt,
refinePromptSchema,
analyzePrompt,
analyzePromptSchema,
} from './tools/index.js';
import {
getAllTemplates,
getTemplateById,
getCategories,
} from './resources/templates.js';
import { logger, getStats, isApiClientAvailable } from './utils/index.js';
/**
* Create and configure the MCP server
*/
export function createServer(): Server {
const server = new Server(
{
name: 'promptarchitect-mcp',
version: '1.0.0',
},
{
capabilities: {
tools: {},
resources: {},
},
}
);
// API client is auto-initialized - no API key needed from user
logger.info('PromptArchitect API client ready');
// Register tool handlers
registerToolHandlers(server);
// Register resource handlers
registerResourceHandlers(server);
// Error handling
server.onerror = (error) => {
logger.error('MCP Server Error', { error });
};
return server;
}
function registerToolHandlers(server: Server): void {
// List available tools
server.setRequestHandler(ListToolsRequestSchema, async () => {
return {
tools: [
{
name: 'generate_prompt',
description: `Transform a raw idea into a well-structured, actionable prompt optimized for AI assistants.
Use this tool when you need to:
• Create a new prompt from scratch
• Structure a vague idea into a clear request
• Generate role-specific prompts (coding, writing, research, etc.)
Supports templates: coding (for programming tasks), writing (for content creation), research (for investigation), analysis (for data/business analysis), factcheck (for verification), general (versatile).
IMPORTANT: When available, pass workspace context (file structure, package.json, tech stack) to generate prompts that align with the user's project.`,
inputSchema: {
type: 'object',
properties: {
idea: {
type: 'string',
description: 'The raw idea or concept to transform into a prompt. Can be brief or detailed.',
},
template: {
type: 'string',
enum: ['coding', 'writing', 'research', 'analysis', 'factcheck', 'general'],
description: 'Template type to use. Default: auto-detected from idea or "general".',
},
context: {
type: 'string',
description: 'Additional context like domain, constraints, or preferences.',
},
targetModel: {
type: 'string',
enum: ['gpt-4', 'claude', 'gemini', 'general'],
description: 'Target AI model for optimization. Default: "general".',
},
workspaceContext: {
type: 'string',
description: 'Project context to ensure the prompt aligns with the codebase. Include: file/folder structure, package.json dependencies, tech stack (React, Node, etc.), relevant code snippets, and the original user request. This helps generate prompts that comply with project conventions.',
},
},
required: ['idea'],
},
},
{
name: 'refine_prompt',
description: `Iteratively improve an existing prompt based on specific feedback.
Use this tool when you need to:
• Improve a prompt that didn't get good results
• Add missing context or constraints
• Make a prompt more specific or clearer
• Adapt a prompt for a different AI model
The tool preserves the original structure while applying targeted improvements.
IMPORTANT: When available, pass workspace context (file structure, package.json, tech stack) to ensure refined prompts comply with the user's project scope and original request.`,
inputSchema: {
type: 'object',
properties: {
prompt: {
type: 'string',
description: 'The current prompt to refine.',
},
feedback: {
type: 'string',
description: 'What should be improved. Examples: "make it more specific", "add error handling requirements", "focus on performance".',
},
preserveStructure: {
type: 'boolean',
description: 'Whether to keep the original structure. Default: true.',
},
targetModel: {
type: 'string',
enum: ['gpt-4', 'claude', 'gemini', 'general'],
description: 'Target AI model for optimization.',
},
workspaceContext: {
type: 'string',
description: 'Project context to ensure the refined prompt aligns with the codebase. Include: file/folder structure, package.json dependencies, tech stack (React, Node, etc.), relevant code snippets, and the original user request. This ensures the refined prompt complies with project conventions and scope.',
},
},
required: ['prompt', 'feedback'],
},
},
{
name: 'analyze_prompt',
description: `Evaluate prompt quality and get actionable improvement suggestions.
Use this tool when you need to:
• Assess if a prompt is well-structured
• Identify weaknesses before using a prompt
• Get specific suggestions for improvement
• Compare prompt quality before/after refinement
Returns scores (0-100) for: clarity, specificity, structure, actionability.`,
inputSchema: {
type: 'object',
properties: {
prompt: {
type: 'string',
description: 'The prompt to analyze.',
},
evaluationCriteria: {
type: 'array',
items: { type: 'string' },
description: 'Specific criteria to evaluate. Default: all criteria.',
},
},
required: ['prompt'],
},
},
{
name: 'get_server_status',
description: `Get PromptArchitect server status and performance metrics.
Use this tool to check:
• Whether AI (Gemini) is available
• Cache hit rate and request statistics
• Average response latency`,
inputSchema: {
type: 'object',
properties: {},
required: [],
},
},
],
};
});
// Handle tool calls
server.setRequestHandler(CallToolRequestSchema, async (request) => {
const { name, arguments: args } = request.params;
logger.info('Tool called', { name, args });
try {
switch (name) {
case 'generate_prompt': {
const input = generatePromptSchema.parse(args);
const result = await generatePrompt(input);
return {
content: [
{
type: 'text',
text: JSON.stringify(result, null, 2),
},
],
};
}
case 'refine_prompt': {
const input = refinePromptSchema.parse(args);
const result = await refinePrompt(input);
return {
content: [
{
type: 'text',
text: JSON.stringify(result, null, 2),
},
],
};
}
case 'analyze_prompt': {
const input = analyzePromptSchema.parse(args);
const result = await analyzePrompt(input);
return {
content: [
{
type: 'text',
text: JSON.stringify(result, null, 2),
},
],
};
}
case 'get_server_status': {
const stats = getStats();
const status = {
server: 'promptarchitect-mcp',
version: '1.0.0',
apiAvailable: isApiClientAvailable(),
performance: {
totalRequests: stats.totalRequests,
cacheHits: stats.cacheHits,
cacheHitRate: stats.totalRequests > 0
? `${Math.round((stats.cacheHits / stats.totalRequests) * 100)}%`
: 'N/A',
avgLatencyMs: stats.avgLatencyMs,
},
status: isApiClientAvailable() ? 'ready' : 'degraded (using fallbacks)',
};
return {
content: [
{
type: 'text',
text: JSON.stringify(status, null, 2),
},
],
};
}
default:
throw new McpError(
ErrorCode.MethodNotFound,
`Unknown tool: "${name}". Available tools: generate_prompt, refine_prompt, analyze_prompt, get_server_status`
);
}
} catch (error) {
if (error instanceof McpError) throw error;
// Provide user-friendly error messages
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
logger.error('Tool execution error', { name, error: errorMessage });
// Check for common error patterns and provide helpful context
let userMessage = errorMessage;
if (errorMessage.includes('parse')) {
userMessage = `Invalid input: ${errorMessage}. Please check the required parameters.`;
} else if (errorMessage.includes('API') || errorMessage.includes('Gemini')) {
userMessage = `AI service error: ${errorMessage}. The server will use fallback generation.`;
}
throw new McpError(ErrorCode.InternalError, userMessage);
}
});
}
function registerResourceHandlers(server: Server): void {
// List available resources
server.setRequestHandler(ListResourcesRequestSchema, async () => {
const templates = getAllTemplates();
const categories = getCategories();
const resources = [
// Category listing
{
uri: 'promptarchitect://templates/categories',
name: 'Template Categories',
description: 'List of available template categories',
mimeType: 'application/json',
},
// Individual templates
...templates.map(t => ({
uri: `promptarchitect://templates/${t.id}`,
name: t.name,
description: t.description,
mimeType: 'application/json',
})),
// Category collections
...categories.map(cat => ({
uri: `promptarchitect://templates/category/${cat}`,
name: `${cat.charAt(0).toUpperCase() + cat.slice(1)} Templates`,
description: `All templates in the ${cat} category`,
mimeType: 'application/json',
})),
];
return { resources };
});
// Read resource content
server.setRequestHandler(ReadResourceRequestSchema, async (request) => {
const { uri } = request.params;
logger.info('Resource requested', { uri });
// Parse URI
const match = uri.match(/^promptarchitect:\/\/templates\/(.+)$/);
if (!match) {
throw new McpError(ErrorCode.InvalidRequest, `Invalid resource URI: ${uri}`);
}
const path = match[1];
// Handle categories list
if (path === 'categories') {
return {
contents: [
{
uri,
mimeType: 'application/json',
text: JSON.stringify(getCategories(), null, 2),
},
],
};
}
// Handle category collection
if (path.startsWith('category/')) {
const category = path.replace('category/', '');
const templates = getAllTemplates().filter(t => t.category === category);
return {
contents: [
{
uri,
mimeType: 'application/json',
text: JSON.stringify(templates, null, 2),
},
],
};
}
// Handle individual template
const template = getTemplateById(path);
if (!template) {
throw new McpError(ErrorCode.InvalidRequest, `Template not found: ${path}`);
}
return {
contents: [
{
uri,
mimeType: 'application/json',
text: JSON.stringify(template, null, 2),
},
],
};
});
}
/**
* Run the server with stdio transport
*/
export async function runServer(): Promise<void> {
const server = createServer();
const transport = new StdioServerTransport();
await server.connect(transport);
logger.info('PromptArchitect MCP Server started');
// Handle graceful shutdown
process.on('SIGINT', async () => {
logger.info('Shutting down...');
await server.close();
process.exit(0);
});
process.on('SIGTERM', async () => {
logger.info('Shutting down...');
await server.close();
process.exit(0);
});
}
export default createServer;