index.ts•34.3 kB
#!/usr/bin/env node
import { Server } from '@modelcontextprotocol/sdk/server/index.js';
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
import {
CallToolRequestSchema,
ErrorCode,
ListToolsRequestSchema,
McpError,
} from '@modelcontextprotocol/sdk/types.js';
import { GitHubService } from './services/github.js';
import { RefactorService } from './services/refactor.js';
import { OpenAIService } from './services/openai.js';
import { consolidatedTools } from './tools/consolidated.js';
import { ToolResponse, ErrorCodes } from './types/responses.js';
import { getConfig } from './utils/config.js';
import { batchProcessFiles, processSingleFile, validateFilePath, extractFileMetadata, getFileStatistics } from './utils/file-processor.js';
import { log, createPerformanceTimer } from './utils/logger.js';
import { monitoring, monitorTool } from './utils/monitoring.js';
import { z } from 'zod';
// Initialize configuration
const config = getConfig();
// Log server startup
log.info('CodeCompass MCP Server starting up', {
nodeVersion: process.version,
nodeEnv: process.env.NODE_ENV,
configSummary: {
hasGitHubToken: !!config.github.token,
hasOpenRouterKey: !!config.openrouter.apiKey,
defaultModel: config.openrouter.defaultModel,
maxResponseTokens: config.response.maxTokens,
logLevel: config.logging.level,
},
});
const server = new Server(
{
name: 'codecompass-mcp',
version: '1.0.0',
},
{
capabilities: {
tools: {},
},
}
);
// Initialize services with configuration
const githubService = new GitHubService();
const refactorService = new RefactorService();
const openaiService = new OpenAIService();
// Helper function to create standardized responses
function createResponse<T>(data: T, error?: any, metadata?: any): ToolResponse<T> {
const response: ToolResponse<T> = {
success: !error,
metadata: {
processing_time: Date.now(),
...metadata,
},
};
if (error) {
// Enhanced error handling with contextual messages
const errorMessage = error.message || 'An error occurred';
const errorCode = error.code || ErrorCodes.PROCESSING_ERROR;
// Add contextual suggestions based on error type
let suggestion = '';
if (errorMessage.includes('rate limit')) {
suggestion = 'Try reducing request frequency or adding GitHub token for higher limits.';
} else if (errorMessage.includes('not found') || errorMessage.includes('404')) {
suggestion = 'Verify the repository URL is correct and the repository is publicly accessible.';
} else if (errorMessage.includes('timeout')) {
suggestion = 'Use chunking mode for large repositories: set chunk_mode=true in options.';
} else if (errorMessage.includes('token')) {
suggestion = 'Check your GitHub and OpenRouter API tokens in environment variables.';
} else if (errorMessage.includes('permission') || errorMessage.includes('403')) {
suggestion = 'Ensure your GitHub token has the necessary permissions for this repository.';
}
response.error = {
code: errorCode,
message: errorMessage,
details: error.details || error,
suggestion: suggestion || 'Check the API documentation for more details.',
timestamp: new Date().toISOString(),
context: {
tool: metadata?.tool || 'unknown',
url: metadata?.url || 'unknown'
}
};
} else {
response.data = data;
}
return response;
}
// Helper function to chunk large responses
function chunkResponse<T>(response: ToolResponse<T>, chunkIndex: number, chunkSize: string): ToolResponse<T> {
if (!response.success || !response.data) {
return response;
}
const data = response.data as any;
const chunkedResponse = { ...response };
const chunkedData = { ...data };
// Get chunk size limits from configuration
const chunkLimits = config.response.chunkSizes;
const limits = chunkLimits[chunkSize as keyof typeof chunkLimits] || chunkLimits.medium;
// Chunk key files
if (data.structure?.keyFiles) {
const keyFiles = data.structure.keyFiles;
const fileEntries = Object.entries(keyFiles);
const startIndex = chunkIndex * limits.filesPerChunk;
const endIndex = startIndex + limits.filesPerChunk;
const chunkedFiles = fileEntries.slice(startIndex, endIndex);
chunkedData.structure = {
...data.structure,
keyFiles: Object.fromEntries(chunkedFiles.map(([filename, content]) => [
filename,
typeof content === 'string' && content.length > limits.fileContent
? content.substring(0, limits.fileContent) + '\n\n... [Content truncated for chunking] ...'
: content
]))
};
// Add chunking metadata
chunkedData.chunkInfo = {
chunkIndex,
chunkSize,
totalFiles: fileEntries.length,
totalChunks: Math.ceil(fileEntries.length / limits.filesPerChunk),
filesInChunk: chunkedFiles.length,
hasMore: endIndex < fileEntries.length,
nextChunkIndex: endIndex < fileEntries.length ? chunkIndex + 1 : null
};
}
chunkedResponse.data = chunkedData;
return chunkedResponse;
}
// Helper function to truncate large responses (fallback for non-chunked mode)
function truncateResponse<T>(response: ToolResponse<T>, maxTokens: number = 25000, maxFileContentLength: number = 1000): ToolResponse<T> {
const jsonString = JSON.stringify(response, null, 2);
// Rough token estimation: 1 token ≈ 4 characters
const estimatedTokens = jsonString.length / 4;
if (estimatedTokens <= maxTokens) {
return response;
}
// If response is too large, truncate data while preserving structure
const truncatedResponse = { ...response };
if (truncatedResponse.success && truncatedResponse.data) {
const data = truncatedResponse.data as any;
// Truncate file contents first
if (data.structure?.keyFiles) {
const keyFiles = data.structure.keyFiles;
Object.keys(keyFiles).forEach(filename => {
if (keyFiles[filename] && keyFiles[filename].length > maxFileContentLength) {
keyFiles[filename] = keyFiles[filename].substring(0, maxFileContentLength) +
'\n\n... [Content truncated due to size limits] ...';
}
});
}
// Truncate file tree if still too large
if (data.structure?.fileTree) {
const truncateFileTree = (tree: any[], maxDepth: number = 3, currentDepth: number = 0): any[] => {
if (currentDepth >= maxDepth) {
return [{ name: '...', type: 'truncated', message: 'Directory tree truncated due to size limits' }];
}
return tree.slice(0, 50).map(node => ({
...node,
children: node.children ? truncateFileTree(node.children, maxDepth, currentDepth + 1) : undefined
}));
};
data.structure.fileTree = truncateFileTree(data.structure.fileTree);
}
// Add truncation warning
if (!data.metadata) {
data.metadata = {};
}
data.metadata.truncated = true;
data.metadata.truncationReason = 'Response size exceeded maximum token limit';
data.metadata.maxTokens = maxTokens;
data.metadata.estimatedTokens = Math.round(estimatedTokens);
data.metadata.suggestion = 'Use chunk_mode=true for complete data access or adjust max_response_tokens and max_file_content_length';
}
return truncatedResponse;
}
// Helper function to format tool responses for MCP
function formatToolResponse<T>(
response: ToolResponse<T>,
maxTokens: number = 25000,
maxFileContentLength: number = 1000,
chunkMode: boolean = false,
chunkIndex: number = 0,
chunkSize: string = 'medium'
) {
let processedResponse;
if (chunkMode) {
processedResponse = chunkResponse(response, chunkIndex, chunkSize);
} else {
processedResponse = truncateResponse(response, maxTokens, maxFileContentLength);
}
return {
content: [
{
type: 'text',
text: JSON.stringify(processedResponse, null, 2),
},
],
};
}
server.setRequestHandler(ListToolsRequestSchema, async () => {
return {
tools: consolidatedTools,
};
});
server.setRequestHandler(CallToolRequestSchema, async (request) => {
const { name, arguments: args } = request.params;
const requestId = monitoring.generateRequestId();
const startTime = Date.now();
// Start monitoring this request
monitoring.startRequest(name, requestId);
try {
let result;
switch (name) {
// Core Data Tools (6 tools)
case 'get_repository_info':
result = await handleGetRepositoryInfo(args);
break;
case 'get_file_tree':
result = await handleGetFileTree(args);
break;
case 'search_repository':
result = await handleSearchRepository(args);
break;
case 'get_file_content':
result = await handleGetFileContent(args);
break;
case 'analyze_dependencies':
result = await handleAnalyzeDependencies(args);
break;
case 'analyze_codebase':
result = await handleAnalyzeCodebase(args);
break;
// AI-Enhanced Tools (3 tools)
case 'review_code':
result = await handleReviewCode(args);
break;
case 'explain_code':
result = await handleExplainCode(args);
break;
case 'suggest_improvements':
result = await handleSuggestImprovements(args);
break;
// Transformation Tools (1 tool)
case 'transform_code':
result = await handleTransformCode(args);
break;
// Utility Tools (1 tool)
case 'health_check':
result = await handleHealthCheck(args);
break;
default:
throw new McpError(ErrorCode.MethodNotFound, `Unknown tool: ${name}`);
}
// Mark request as successful
monitoring.completeRequest(name, startTime, true, undefined, requestId);
return result;
} catch (error) {
// Mark request as failed
monitoring.completeRequest(name, startTime, false, (error as Error).message, requestId);
const response = createResponse(null, error);
return formatToolResponse(response);
}
});
// Tool handlers using CodeCompass-main core functionality
async function handleGetRepositoryInfo(args: any) {
const { url, options = {} } = args;
try {
const info = await githubService.getRepositoryInfo(url);
const response = {
repository: {
name: info.name,
description: info.description,
owner: info.owner,
language: info.language,
defaultBranch: info.defaultBranch,
createdAt: info.createdAt,
updatedAt: info.updatedAt,
license: info.license,
...(options.include_stats && {
stats: {
stars: info.stars,
fileCount: info.fileCount,
lineCount: info.lineCount,
}
}),
...(options.include_languages && {
languages: info.languages
}),
...(options.include_topics && {
topics: [] // Add topics to GitHubRepoInfo interface if needed
})
}
};
return formatToolResponse(createResponse(response, null, { tool: 'get_repository_info', url }));
} catch (error) {
return formatToolResponse(createResponse(null, error, { tool: 'get_repository_info', url }));
}
}
async function handleGetFileTree(args: any) {
const { url, options = {} } = args;
try {
const tree = await githubService.getFileTree(url);
const response = {
file_tree: tree,
metadata: {
max_depth: options.max_depth || 10,
include_hidden: options.include_hidden || false,
total_files: tree.length,
filtered_extensions: options.file_extensions || null,
excluded_paths: options.exclude_paths || ['node_modules', 'dist', 'build', '.git']
}
};
return formatToolResponse(createResponse(response, null, { tool: 'get_file_tree', url }));
} catch (error) {
return formatToolResponse(createResponse(null, error, { tool: 'get_file_tree', url }));
}
}
// Legacy handler - remove after testing
async function handleFetchRepositoryData(args: any) {
try {
const { url, options = {} } = args;
// Extract size control options with config defaults
const maxTokens = options.max_response_tokens || config.response.maxTokens;
const maxFileContentLength = options.max_file_content_length || config.response.maxFileContentLength;
const chunkMode = options.chunk_mode || false;
const chunkIndex = options.chunk_index || 0;
const chunkSize = options.chunk_size || 'medium';
// Use CodeCompass-main's core GitHub service functionality
const repositoryInfo = await githubService.getRepositoryInfo(url);
const analysis = await githubService.analyzeRepository(url);
const result = {
info: repositoryInfo,
structure: {
fileCount: repositoryInfo.fileCount,
lineCount: repositoryInfo.lineCount,
fileTree: repositoryInfo.fileTree,
keyFiles: repositoryInfo.keyFiles,
},
dependencies: analysis.dependencies,
architecture: analysis.architecture,
};
const response = createResponse(result);
return formatToolResponse(response, maxTokens, maxFileContentLength, chunkMode, chunkIndex, chunkSize);
} catch (error) {
const response = createResponse(null, error, { tool: 'fetch_repository_data', url: args.url });
return formatToolResponse(response);
}
}
async function handleSearchRepository(args: any) {
try {
const { url, query, search_type = 'text', options = {} } = args;
// Get repository content and perform search
const repositoryInfo = await githubService.getRepositoryInfo(url);
const searchResults = await githubService.searchInRepository(url, query, {
type: search_type,
...options,
});
const response = createResponse(searchResults);
return formatToolResponse(response);
} catch (error) {
const response = createResponse(null, error, { tool: 'search_repository', url: args.url, query: args.query });
return formatToolResponse(response);
}
}
async function handleGetFileContent(args: any) {
try {
const { url, file_paths, options = {} } = args;
// Validate file paths first
const pathValidationErrors: string[] = [];
for (const filePath of file_paths) {
const validation = validateFilePath(filePath);
if (!validation.valid) {
pathValidationErrors.push(`${filePath}: ${validation.error}`);
}
}
if (pathValidationErrors.length > 0) {
throw new Error(`Invalid file paths detected:\n${pathValidationErrors.join('\n')}`);
}
// Fetch file contents from GitHub
const fileContents: Array<{ path: string; content: string }> = [];
const fetchErrors: Record<string, string> = {};
for (const filePath of file_paths) {
try {
const content = await githubService.getFileContent(url, filePath);
fileContents.push({ path: filePath, content });
} catch (error: any) {
fetchErrors[filePath] = error.message;
}
}
// Process files using batch processing
const batchOptions = {
maxConcurrent: options.max_concurrent || config.limits.maxConcurrentRequests,
continueOnError: options.continue_on_error !== false,
validatePaths: false, // Already validated above
includeMetadata: options.include_metadata !== false,
maxFileSize: options.max_size || config.limits.maxFileSize,
allowedExtensions: options.file_extensions,
excludePatterns: options.exclude_patterns,
};
const batchResult = await batchProcessFiles(fileContents, batchOptions);
// Combine results with fetch errors
const results: Record<string, any> = {};
// Add successful and failed processing results
batchResult.results.forEach(result => {
if (result.success) {
results[result.filePath] = {
content: result.content,
metadata: result.metadata,
size: result.metadata?.size || 0,
truncated: result.metadata?.size ? result.metadata.size > (options.max_size || config.limits.maxFileSize) : false,
};
} else {
results[result.filePath] = {
error: result.error?.message || 'Processing failed',
details: result.error?.details,
};
}
});
// Add fetch errors
Object.entries(fetchErrors).forEach(([filePath, error]) => {
results[filePath] = {
error: `Failed to fetch: ${error}`,
};
});
// Add processing statistics
const statistics = getFileStatistics(batchResult.results.filter(r => r.success));
const response = createResponse({
files: results,
summary: {
...batchResult.summary,
fetchErrors: Object.keys(fetchErrors).length,
statistics,
},
});
return formatToolResponse(response);
} catch (error) {
const response = createResponse(null, error, { tool: 'get_file_content', url: args.url });
return formatToolResponse(response);
}
}
async function handleAnalyzeCodebase(args: any) {
try {
const { url, file_paths, options = {} } = args;
const analysis = await githubService.analyzeCodeStructure(url, file_paths, options);
const response = createResponse(analysis);
return formatToolResponse(response);
} catch (error) {
const response = createResponse(null, error);
return formatToolResponse(response);
}
}
async function handleAnalyzeDependencies(args: any) {
try {
const { url, options = {} } = args;
const dependencies = await githubService.analyzeDependencies(url);
const response = createResponse(dependencies);
return formatToolResponse(response);
} catch (error) {
const response = createResponse(null, error);
return formatToolResponse(response);
}
}
async function handleCalculateMetrics(args: any) {
try {
const { url, options = {} } = args;
const metrics = await githubService.calculateMetrics(url, options);
const response = createResponse(metrics);
return formatToolResponse(response);
} catch (error) {
const response = createResponse(null, error);
return formatToolResponse(response);
}
}
async function handleTransformCode(args: any) {
try {
const { code, transformations, language, target_language, options = {} } = args;
const result = await refactorService.transformCode(
code,
transformations,
language,
target_language,
options
);
const response = createResponse(result);
return formatToolResponse(response);
} catch (error) {
const response = createResponse(null, error);
return formatToolResponse(response);
}
}
async function handleExtractComponents(args: any) {
try {
const { url, extraction_types = ['components', 'functions', 'utilities'], options = {} } = args;
const components = await refactorService.extractReusableComponents(url, extraction_types);
const response = createResponse(components);
return formatToolResponse(response);
} catch (error) {
const response = createResponse(null, error);
return formatToolResponse(response);
}
}
async function handleAdaptCodeStructure(args: any) {
try {
const { url, target_structure, options = {} } = args;
const result = await refactorService.adaptCodeStructure(url, target_structure, options);
const response = createResponse(result);
return formatToolResponse(response);
} catch (error) {
const response = createResponse(null, error);
return formatToolResponse(response);
}
}
async function handleGenerateProjectTemplate(args: any) {
try {
const { url, template_type, options = {} } = args;
const template = await refactorService.generateBoilerplate(url, template_type, options);
const response = createResponse(template);
return formatToolResponse(response);
} catch (error) {
const response = createResponse(null, error);
return formatToolResponse(response);
}
}
async function handleAnalyzeArchitecture(args: any) {
try {
const { url, options = {} } = args;
const architecture = await githubService.analyzeArchitecturePublic(url, options);
const response = createResponse(architecture);
return formatToolResponse(response);
} catch (error) {
const response = createResponse(null, error);
return formatToolResponse(response);
}
}
async function handleCompareImplementations(args: any) {
try {
const { implementations, comparison_criteria, options = {} } = args;
const comparison = await githubService.compareRepositories(implementations, comparison_criteria, options);
const response = createResponse(comparison);
return formatToolResponse(response);
} catch (error) {
const response = createResponse(null, error);
return formatToolResponse(response);
}
}
async function handleValidateCodeQuality(args: any) {
try {
const { url, validation_types, options = {} } = args;
const validation = await githubService.validateCodeQuality(url, validation_types, options);
const response = createResponse(validation);
return formatToolResponse(response);
} catch (error) {
const response = createResponse(null, error);
return formatToolResponse(response);
}
}
async function handleBatchProcess(args: any) {
try {
const { operations, options = {} } = args;
const results = [];
const startTime = Date.now();
for (const operation of operations) {
const operationStartTime = Date.now();
try {
// Recursively call the appropriate handler
const result = await server.request(
{ method: 'tools/call', params: { name: operation.tool, arguments: operation.params } } as any,
{} as any
);
results.push({
id: operation.id,
success: true,
data: result,
processingTime: Date.now() - operationStartTime,
});
} catch (error: any) {
results.push({
id: operation.id,
success: false,
error: error.message,
processingTime: Date.now() - operationStartTime,
});
}
}
const batchResult = {
operations: operations.map((op: any) => ({ type: op.tool, params: op.params, id: op.id })),
results,
totalTime: Date.now() - startTime,
successCount: results.filter(r => r.success).length,
failureCount: results.filter(r => !r.success).length,
};
const response = createResponse(batchResult);
return formatToolResponse(response);
} catch (error) {
const response = createResponse(null, error);
return formatToolResponse(response);
}
}
async function handleHealthCheck(args: any) {
try {
const { checks = ['api-limits', 'system-health', 'monitoring'], options = {} } = args;
// Get comprehensive health status from monitoring system
const monitoringHealth = monitoring.getHealthStatus();
const serverMetrics = monitoring.getMetrics();
const health: any = {
status: monitoringHealth.status,
timestamp: new Date().toISOString(),
checks: { ...monitoringHealth.checks },
metrics: options.include_metrics ? {
uptime: serverMetrics.uptime,
memory: serverMetrics.memory,
version: '1.0.0',
requestCount: serverMetrics.requestCount,
errorCount: serverMetrics.errorCount,
averageResponseTime: serverMetrics.responseTime.average,
toolUsage: serverMetrics.toolUsage,
} : undefined,
};
// Add additional checks based on requested types
for (const check of checks) {
switch (check) {
case 'api-limits':
try {
health.checks[check] = await githubService.checkApiLimits();
} catch (error: any) {
health.checks[check] = { status: 'error', error: error.message };
}
break;
case 'monitoring':
health.checks[check] = {
status: 'healthy',
totalRequests: serverMetrics.requestCount,
errorRate: serverMetrics.requestCount > 0 ? Math.round((serverMetrics.errorCount / serverMetrics.requestCount) * 100) : 0,
uptime: serverMetrics.uptime,
memoryUsage: Math.round((serverMetrics.memory.heapUsed / serverMetrics.memory.heapTotal) * 100),
};
break;
case 'dependencies':
health.checks[check] = { status: 'healthy' };
break;
case 'configuration':
health.checks[check] = {
status: 'healthy',
hasGitHubToken: !!config.github.token,
hasOpenRouterKey: !!config.openrouter.apiKey,
logLevel: config.logging.level,
maxResponseTokens: config.response.maxTokens,
};
break;
}
}
// Add performance insights if requested
if (options.include_insights) {
const insights = monitoring.getPerformanceInsights();
health.insights = insights;
}
// Add recent logs if requested
if (options.include_logs) {
const logBuffer = log.getLogBuffer();
health.recentLogs = logBuffer.slice(-10);
}
const response = createResponse(health);
return formatToolResponse(response);
} catch (error) {
const response = createResponse(null, error);
return formatToolResponse(response);
}
}
// AI-Enhanced Tool Handlers
async function handleReviewCode(args: any) {
try {
const { url, file_paths, review_focus = ['security', 'performance', 'maintainability'], options = {} } = args;
// Get repository info and code content
const repoInfo = await githubService.getRepositoryInfo(url);
let filesToReview: Record<string, string> = {};
if (file_paths && file_paths.length > 0) {
// Get specific files
for (const filePath of file_paths) {
try {
const content = await githubService.getFileContent(url, filePath);
filesToReview[filePath] = content;
} catch (error) {
// Skip files that can't be fetched
}
}
} else {
// Use key files from repository
filesToReview = repoInfo.keyFiles;
}
if (Object.keys(filesToReview).length === 0) {
throw new Error('No files found to review');
}
// Prepare code for AI review
const codeContext = Object.entries(filesToReview)
.map(([path, content]) => `--- ${path} ---\n${content}`)
.join('\n\n');
const focusAreas = review_focus.join(', ');
// Generate AI review with specified model
const aiReviewResult = await openaiService.generateCodeReview(
codeContext,
repoInfo.language || 'javascript',
review_focus,
options.ai_model
);
const result = {
repository: {
name: repoInfo.name,
description: repoInfo.description,
language: repoInfo.language,
owner: repoInfo.owner,
},
review: {
files_reviewed: Object.keys(filesToReview),
focus_areas: review_focus,
ai_model_used: aiReviewResult.modelUsed,
ai_model_requested: options.ai_model || 'auto',
analysis: aiReviewResult.content,
severity_threshold: options.severity_threshold || 'medium',
timestamp: new Date().toISOString(),
model_warning: aiReviewResult.warning,
},
recommendations: {
priority_fixes: [],
suggestions: [],
best_practices: [],
},
};
const response = createResponse(result);
return formatToolResponse(response);
} catch (error) {
const response = createResponse(null, error);
return formatToolResponse(response);
}
}
async function handleExplainCode(args: any) {
try {
const { url, file_paths, explanation_type = 'overview', options = {} } = args;
// Get repository info and code content
const repoInfo = await githubService.getRepositoryInfo(url);
let filesToExplain: Record<string, string> = {};
if (file_paths && file_paths.length > 0) {
// Get specific files
for (const filePath of file_paths) {
try {
const content = await githubService.getFileContent(url, filePath);
filesToExplain[filePath] = content;
} catch (error) {
// Skip files that can't be fetched
}
}
} else {
// Use key files from repository
filesToExplain = repoInfo.keyFiles;
}
if (Object.keys(filesToExplain).length === 0) {
throw new Error('No files found to explain');
}
// Generate AI explanation based on type
let aiExplanation: string;
let aiExplanationResult: { content: string; modelUsed: string; warning?: string };
switch (explanation_type) {
case 'architecture':
aiExplanation = await openaiService.explainArchitecture(url, repoInfo);
// For architecture, create a mock result for consistency
aiExplanationResult = {
content: aiExplanation,
modelUsed: options.ai_model || 'anthropic/claude-3.5-sonnet',
warning: undefined
};
break;
case 'overview':
case 'detailed':
case 'tutorial':
case 'integration':
default:
// Create a prompt for the specific explanation type
const codeContext = Object.entries(filesToExplain)
.map(([path, content]) => `--- ${path} ---\n${content}`)
.join('\n\n');
const prompt = `Please provide a ${explanation_type} explanation of this ${repoInfo.language || 'code'} repository:
Repository: ${repoInfo.name}
Description: ${repoInfo.description || 'No description'}
Language: ${repoInfo.language || 'Multiple'}
Code:
${codeContext}
Please focus on:
${options.focus_on_patterns ? '- Design patterns and architecture' : ''}
${options.include_examples ? '- Code examples and usage' : ''}
${options.include_diagrams ? '- Visual diagrams where helpful' : ''}
Target audience: ${options.target_audience || 'intermediate'}`;
aiExplanationResult = await openaiService.chatWithRepository(url, prompt, undefined, options.ai_model);
aiExplanation = aiExplanationResult.content;
break;
}
const result = {
repository: {
name: repoInfo.name,
description: repoInfo.description,
language: repoInfo.language,
owner: repoInfo.owner,
},
explanation: {
type: explanation_type,
files_analyzed: Object.keys(filesToExplain),
ai_model_used: aiExplanationResult.modelUsed,
ai_model_requested: options.ai_model || 'auto',
target_audience: options.target_audience || 'intermediate',
content: aiExplanation,
timestamp: new Date().toISOString(),
model_warning: aiExplanationResult.warning,
},
metadata: {
file_count: Object.keys(filesToExplain).length,
total_lines: Object.values(filesToExplain).reduce((sum, content) => sum + content.split('\n').length, 0),
},
};
const response = createResponse(result);
return formatToolResponse(response);
} catch (error) {
const response = createResponse(null, error);
return formatToolResponse(response);
}
}
async function handleSuggestImprovements(args: any) {
try {
const { url, file_paths, refactoring_goals = ['modernize', 'maintainability'], target_framework, options = {} } = args;
// Get repository info and code content
const repoInfo = await githubService.getRepositoryInfo(url);
let filesToRefactor: Record<string, string> = {};
if (file_paths && file_paths.length > 0) {
// Get specific files
for (const filePath of file_paths) {
try {
const content = await githubService.getFileContent(url, filePath);
filesToRefactor[filePath] = content;
} catch (error) {
// Skip files that can't be fetched
}
}
} else {
// Use key files from repository
filesToRefactor = repoInfo.keyFiles;
}
if (Object.keys(filesToRefactor).length === 0) {
throw new Error('No files found to analyze for refactoring');
}
// Generate AI refactoring suggestions
const targetProject = {
framework: target_framework || 'Not specified',
language: repoInfo.language || 'javascript',
constraints: [],
timeline: 'Not specified',
};
const aiSuggestionsResult = await openaiService.suggestRefactoringPlan(url, targetProject, refactoring_goals, options.ai_model);
const result = {
repository: {
name: repoInfo.name,
description: repoInfo.description,
language: repoInfo.language,
owner: repoInfo.owner,
},
refactoring: {
goals: refactoring_goals,
target_framework: target_framework,
files_analyzed: Object.keys(filesToRefactor),
ai_model_used: aiSuggestionsResult.modelUsed,
ai_model_requested: options.ai_model || 'auto',
suggestions: aiSuggestionsResult.content,
priority_level: options.priority_level || 'medium',
timestamp: new Date().toISOString(),
model_warning: aiSuggestionsResult.warning,
},
metadata: {
file_count: Object.keys(filesToRefactor).length,
total_lines: Object.values(filesToRefactor).reduce((sum, content) => sum + content.split('\n').length, 0),
estimated_effort: options.estimate_effort ? 'Will be provided by AI' : null,
},
};
const response = createResponse(result);
return formatToolResponse(response);
} catch (error) {
const response = createResponse(null, error);
return formatToolResponse(response);
}
}
// Start the server
async function main() {
const transport = new StdioServerTransport();
await server.connect(transport);
}
main().catch(console.error);