Skip to main content
Glama

merge_content

Merge content from multiple files into a single output file with options to compress, use .gitignore rules, process streams, and customize file processing for efficient code repository management.

Instructions

Merges content from multiple files into a single output file.

Input Schema

NameRequiredDescriptionDefault
compressNoWhether to compress the output.
custom_blacklistNoCustom blacklist items.
ignore_gitNoWhether to ignore the .git directory.
max_filesNoMaximum number of files to process.
pathYesThe target file or directory path.
use_cacheNoWhether to use file content cache.
use_gitignoreNoWhether to use .gitignore rules.
use_streamsNoWhether to use stream processing for large files.

Input Schema (JSON Schema)

{ "$schema": "http://json-schema.org/draft-07/schema#", "additionalProperties": false, "properties": { "compress": { "description": "Whether to compress the output.", "type": "boolean" }, "custom_blacklist": { "description": "Custom blacklist items.", "items": { "type": "string" }, "type": "array" }, "ignore_git": { "description": "Whether to ignore the .git directory.", "type": "boolean" }, "max_files": { "description": "Maximum number of files to process.", "type": "number" }, "path": { "description": "The target file or directory path.", "type": "string" }, "use_cache": { "description": "Whether to use file content cache.", "type": "boolean" }, "use_gitignore": { "description": "Whether to use .gitignore rules.", "type": "boolean" }, "use_streams": { "description": "Whether to use stream processing for large files.", "type": "boolean" } }, "required": [ "path" ], "type": "object" }

Implementation Reference

  • The core `handleRequest` function implementing the merge_content tool: lists files, reads contents using optimized readers, merges with file headers, supports compression, caching, streaming, and includes memory monitoring and performance metrics.
    async function handleRequest(parameters) { console.error('merge_content: Starting execution'); const startTime = Date.now(); // 启动内存监控 memoryMonitor.start(10000); // 每10秒检查一次 const { path: targetPath, compress = false, use_gitignore, ignore_git, custom_blacklist, use_cache = true, use_streams = true, max_files = 1000 // 限制处理文件数量 } = parameters; if (!targetPath) { memoryMonitor.stop(); throw new Error("Missing required parameter: 'path'."); } // Resolve to absolute path const absolutePath = path.resolve(targetPath); console.error(`merge_content: Resolved path to ${absolutePath}`); let rootPath = absolutePath; // Assume path is directory initially let filesToProcess = []; // Validate path existence and determine if it's a file or directory try { const stats = await fs.stat(absolutePath); console.error(`merge_content: Path exists, checking type`); if (stats.isDirectory()) { // Path is a directory, list files within it console.error(`merge_content: Path is a directory, listing files`); rootPath = absolutePath; // Keep rootPath as the directory itself // For performance, default to false for gitignore to speed up processing filesToProcess = await listFiles(absolutePath, { useGitignore: use_gitignore || false, ignoreGit: ignore_git || true, customBlacklist: custom_blacklist || [] }); // 限制文件数量以防止内存耗尽 if (filesToProcess.length > max_files) { console.error(`merge_content: Limiting files from ${filesToProcess.length} to ${max_files}`); filesToProcess = filesToProcess.slice(0, max_files); } console.error(`merge_content: Found ${filesToProcess.length} files in directory`); } else if (stats.isFile()) { // Path is a single file console.error(`merge_content: Path is a file`); rootPath = path.dirname(absolutePath); // Set rootPath to the parent directory const relativeFilePath = path.basename(absolutePath); // Skip gitignore checks for single files to improve performance filesToProcess.push(relativeFilePath); console.error(`merge_content: Added single file to process: ${relativeFilePath}`); // Check if it's a binary file const fileExtension = path.extname(relativeFilePath).toLowerCase(); if (BINARY_EXTENSIONS.has(fileExtension)) { filesToProcess = []; // Clear if binary console.error(`merge_content: Skipping binary file: ${relativeFilePath}`); } } else { // Path exists but is not a file or directory (e.g., socket, fifo) memoryMonitor.stop(); throw new Error(`Path '${targetPath}' is not a file or directory.`); } } catch (error) { memoryMonitor.stop(); if (error.code === 'ENOENT') { throw new Error(`Path '${targetPath}' not found.`); } throw new Error(`Error accessing path '${targetPath}': ${error.message}`); } if (filesToProcess.length === 0) { // If no files are left after filtering (or it was an ignored/binary single file) console.error(`merge_content: No files to process, returning empty content`); memoryMonitor.stop(); return { merged_content: "" }; // Return empty content } // Read the content of the filtered files console.error(`merge_content: Reading content of ${filesToProcess.length} files`); // 使用优化的文件读取函数,支持缓存和流式处理 const fileContentsMap = await readFiles(filesToProcess, rootPath, { useCache: use_cache, useStreams: use_streams, progressCallback: (progress) => { if (progress.percent % 10 === 0) { // 每完成10%输出一次进度 console.error(`merge_content: Reading progress ${progress.percent}% (${progress.completed}/${progress.total})`); } } }); // Combine contents with headers, ensuring consistent order console.error(`merge_content: Combining file contents`); let combinedContent = ""; // Sort file paths before merging for deterministic output const sortedRelativePaths = [...fileContentsMap.keys()].sort((a, b) => a.localeCompare(b)); let filesProcessed = 0; sortedRelativePaths.forEach(relativeFilePath => { const content = fileContentsMap.get(relativeFilePath); if (content !== undefined) { // Check if file read was successful // Add a header to distinguish file contents const safeFilePath = relativeFilePath.replace(/\\/g, '/'); combinedContent += `=== File Path: ${safeFilePath} ===\n\n`; // Process content to ensure it's safe for JSON serialization // We don't need to apply makeJsonSafe here as we're just building the combined content // The final result will be handled by the MCP server's adaptToolResult function combinedContent += content; combinedContent += '\n\n' + '='.repeat(50) + '\n\n'; filesProcessed++; } }); console.error(`merge_content: Successfully processed ${filesProcessed} files`); // Apply compression if requested let finalContent = combinedContent.trim(); // Trim final whitespace if (compress) { console.error(`merge_content: Compressing content`); finalContent = compressContent(finalContent); } // 获取内存使用情况 const memoryUsage = memoryMonitor.getMemoryUsage(); const executionTime = Date.now() - startTime; // 停止内存监控 memoryMonitor.stop(); console.error(`merge_content: Execution completed in ${executionTime}ms, memory used: ${memoryUsage.heapUsedMB}MB`); // 返回结果并包含性能指标 return { merged_content: finalContent, performance: { executionTime, filesProcessed, totalFiles: filesToProcess.length, memoryUsedMB: memoryUsage.heapUsedMB, cacheStats: use_cache ? getFileCache().getStats() : null } }; }
  • Zod input schema validation for the merge_content tool parameters.
    compress: z.boolean().optional().describe('Whether to compress the output.'), use_gitignore: z.boolean().optional().describe('Whether to use .gitignore rules.'), ignore_git: z.boolean().optional().describe('Whether to ignore the .git directory.'), custom_blacklist: z.array(z.string()).optional().describe('Custom blacklist items.'), use_cache: z.boolean().optional().describe('Whether to use file content cache.'), use_streams: z.boolean().optional().describe('Whether to use stream processing for large files.'), max_files: z.number().optional().describe('Maximum number of files to process.') }, async (params) => {
  • MCP SDK tool registration for 'merge_content', importing handler from ./tools/merge_content, defining schema, and providing async wrapper for execution and result adaptation.
    // Register the merge_content tool if (mergeContentHandler) { server.tool( 'merge_content', 'Merges content from multiple files into a single output file.', { path: z.string().describe('The target file or directory path.'), compress: z.boolean().optional().describe('Whether to compress the output.'), use_gitignore: z.boolean().optional().describe('Whether to use .gitignore rules.'), ignore_git: z.boolean().optional().describe('Whether to ignore the .git directory.'), custom_blacklist: z.array(z.string()).optional().describe('Custom blacklist items.'), use_cache: z.boolean().optional().describe('Whether to use file content cache.'), use_streams: z.boolean().optional().describe('Whether to use stream processing for large files.'), max_files: z.number().optional().describe('Maximum number of files to process.') }, async (params) => { logInfo(`Executing merge_content tool with params: ${JSON.stringify(params)}`); try { const startTime = Date.now(); const result = await mergeContentHandler(params); const executionTime = Date.now() - startTime; logDebug(`merge_content completed in ${executionTime}ms`); // Ensure the merged content is safe for JSON serialization if (result && result.merged_content) { // We don't modify the content directly here // The adaptToolResult function will handle the content safely logDebug(`merge_content result size: ${result.merged_content.length} characters`); } return adaptToolResult(result); } catch (error) { logError('Error in merge_content tool:', error); throw error; } } ); }

Other Tools

Related Tools

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/yy1588133/code-merge-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server