read_context
Read and analyze code files with advanced filtering and chunking, automatically ignoring common artifact directories and files for efficient code examination.
Instructions
Read and analyze code files with advanced filtering and chunking. The server automatically ignores common artifact directories and files:
Version Control: .git/
Python: .venv/, pycache/, *.pyc, etc.
JavaScript/Node.js: node_modules/, bower_components/, .next/, dist/, etc.
IDE/Editor: .idea/, .vscode/, .env, etc.
For large files or directories, use get_chunk_count first to determine total chunks, then request specific chunks using chunkNumber parameter.
Input Schema
| Name | Required | Description | Default |
|---|---|---|---|
| path | Yes | Path to file or directory to read | |
| maxSize | No | Maximum file size in bytes. Files larger than this will be chunked. | |
| encoding | No | File encoding (e.g., utf8, ascii, latin1) | utf8 |
| recursive | No | Whether to read directories recursively (includes subdirectories) | |
| fileTypes | No | File extension(s) to include WITHOUT dots (e.g. ["ts", "js", "py"] or just "ts"). Empty/undefined means all files. | |
| chunkNumber | No | Which chunk to return (0-based). Use with get_chunk_count to handle large files/directories. |
Implementation Reference
- src/index.ts:740-762 (handler)Handler method that processes read_context tool requests, reads file/directory content using readContent, chunks it, and returns JSON response.private async handleReadFile(args: any) { const { path: filePath, encoding = 'utf8', maxSize, recursive = true, fileTypes, chunkNumber = 0 } = args; try { const filesInfo = await this.readContent(filePath, encoding as BufferEncoding, maxSize, recursive, fileTypes); const { content, hasMore } = this.getContentChunk(filesInfo, chunkNumber * this.config.chunkSize); return this.createJsonResponse({ content, hasMore, nextChunk: hasMore ? chunkNumber + 1 : null }); } catch (error) { throw this.handleFileOperationError(error, 'read file', filePath); } }
- src/index.ts:268-306 (schema)Input schema and description for the read_context tool defined in server capabilities.read_context: { description: 'WARNING: Run get_chunk_count first to determine total chunks, then request specific chunks using chunkNumber parameter.\nRead and analyze code files with advanced filtering and chunking. The server automatically ignores common artifact directories and files:\n- Version Control: .git/\n- Python: .venv/, __pycache__/, *.pyc, etc.\n- JavaScript/Node.js: node_modules/, bower_components/, .next/, dist/, etc.\n- IDE/Editor: .idea/, .vscode/, .env, etc.\n\n**WARNING** use get_chunk_count first to determine total chunks, then request specific chunks using chunkNumber parameter.', inputSchema: { type: 'object', properties: { path: { type: 'string', description: 'Path to file or directory to read' }, maxSize: { type: 'number', description: 'Maximum file size in bytes. Files larger than this will be chunked.', default: 1048576 }, encoding: { type: 'string', description: 'File encoding (e.g., utf8, ascii, latin1)', default: 'utf8' }, recursive: { type: 'boolean', description: 'Whether to read directories recursively (includes subdirectories)', default: true }, fileTypes: { type: ['array', 'string'], items: { type: 'string' }, description: 'File extension(s) to include WITHOUT dots (e.g. ["ts", "js", "py"] or just "ts"). Empty/undefined means all files.', default: [] }, chunkNumber: { type: 'number', description: 'Which chunk to return (0-based). Use with get_chunk_count to handle large files/directories.', default: 0 } }, required: ['path'] } },
- src/index.ts:1604-1626 (registration)Registration of read_context tool in the CallToolRequestSchema request handler switch statement, mapping to handleReadFile.switch (request.params.name) { case 'list_context_files': return await this.handleListFiles(request.params.arguments); case 'read_context': return await this.handleReadFile(request.params.arguments); case 'search_context': return await this.handleSearchFiles(request.params.arguments); case 'get_chunk_count': return await this.handleGetChunkCount(request.params.arguments); case 'set_profile': return await this.handleSetProfile(request.params.arguments); case 'get_profile_context': return await this.handleGetProfileContext(request.params.arguments); case 'generate_outline': return await this.handleGenerateOutline(request.params.arguments); case 'getFiles': return await this.handleGetFiles(request.params.arguments); default: throw new McpError( ErrorCode.MethodNotFound, `Unknown tool: ${request.params.name}` ); }
- src/index.ts:789-903 (helper)Core helper method implementing the file reading logic for read_context, handling single files and directories with glob, filtering, caching, and ignoring patterns.private async readContent( filePath: string, encoding: BufferEncoding = 'utf8', maxSize?: number, recursive: boolean = true, fileTypes?: string[] | string ): Promise<FilesInfo> { const filesInfo: FilesInfo = {}; const absolutePath = path.resolve(filePath); const cleanFileTypes = Array.isArray(fileTypes) ? fileTypes.map(ext => ext.toLowerCase().replace(/^\./, '')) : fileTypes ? [fileTypes.toLowerCase().replace(/^\./, '')] : undefined; await this.loggingService.debug('Reading content with file type filtering', { cleanFileTypes, absolutePath, operation: 'read_content' }); // Handle single file if ((await fs.stat(absolutePath)).isFile()) { if (cleanFileTypes && !cleanFileTypes.some(ext => absolutePath.toLowerCase().endsWith(`.${ext}`))) { return filesInfo; } const stat = await fs.stat(absolutePath); if (maxSize && stat.size > maxSize) { throw new FileOperationError( FileErrorCode.FILE_TOO_LARGE, `File ${absolutePath} exceeds maximum size limit of ${maxSize} bytes`, absolutePath ); } // Check cache first const cached = this.fileContentCache.get(absolutePath); let content: string; if (cached && cached.lastModified === stat.mtimeMs) { content = cached.content; } else { content = await fs.readFile(absolutePath, encoding); this.fileContentCache.set(absolutePath, { content, lastModified: stat.mtimeMs }); } const hash = createHash('md5').update(content).digest('hex'); filesInfo[absolutePath] = { path: absolutePath, content, hash, size: stat.size, lastModified: stat.mtimeMs }; return filesInfo; } // Handle directory: use POSIX join for glob const pattern = recursive ? '**/*' : '*'; const globPattern = path.posix.join(absolutePath.split(path.sep).join(path.posix.sep), pattern); const files = await this.globPromise(globPattern, { ignore: DEFAULT_IGNORE_PATTERNS, nodir: true, dot: false, cache: true, follow: false }); await Promise.all(files.map(async (file) => { if (cleanFileTypes && !cleanFileTypes.some(ext => file.toLowerCase().endsWith(`.${ext}`))) { return; } try { const stat = await fs.stat(file); if (maxSize && stat.size > maxSize) { return; } // Check cache first const cached = this.fileContentCache.get(file); let content: string; if (cached && cached.lastModified === stat.mtimeMs) { content = cached.content; } else { content = await fs.readFile(file, encoding); this.fileContentCache.set(file, { content, lastModified: stat.mtimeMs }); } const hash = createHash('md5').update(content).digest('hex'); filesInfo[file] = { path: file, content, hash, size: stat.size, lastModified: stat.mtimeMs }; } catch (error) { await this.loggingService.error('Error reading file for info collection', error as Error, { filePath: file, operation: 'get_files_info' }); } })); return filesInfo; }