read_specific_document
Retrieve the full content of a specific project documentation file by providing its exact file name. Supports pagination for longer documents to get complete context.
Instructions
Read full documentation file content when you need complete context.
Input Schema
| Name | Required | Description | Default |
|---|---|---|---|
| fileName | Yes | Name of the documentation file to read. Must match exactly. Example: "coding-standards.md" | |
| page | No | Page number for paginated content. Default: 1 |
Implementation Reference
- src/index.js:206-222 (registration)Tool registration with input schema (fileName string, page number) in ListToolsRequestSchema handler
name: 'read_specific_document', description: 'Read full documentation file content when you need complete context.', inputSchema: { type: 'object', properties: { fileName: { type: 'string', description: 'Name of the documentation file to read. Must match exactly. Example: "coding-standards.md"' }, page: { type: 'number', description: 'Page number for paginated content. Default: 1' } }, required: ['fileName'] } }, - src/index.js:440-480 (handler)Main handler for read_specific_document: gets document via docService.getDocument(), formats it, and paginates if > 20K tokens
case 'read_specific_document': const fileName = args?.fileName; const page = args?.page || 1; if (!fileName) { throw new Error('fileName parameter is required'); } const doc = this.docService.getDocument(fileName); if (!doc) { throw new Error(`Document not found: ${fileName}`); } const fullContent = await this.formatSingleDocument(doc); const fullContentTokens = TokenEstimator.estimateTokens(fullContent); // Check if pagination is needed if (fullContentTokens <= 20000) { return { content: [{ type: 'text', text: fullContent }] }; } // Use pagination for large documents const contentChunks = this.paginationService.chunkText(fullContent, 20000); const docTotalPages = contentChunks.length; if (page < 1 || page > docTotalPages) { throw new Error(`Invalid page number. Must be between 1 and ${docTotalPages}`); } const paginationHeader = this.paginationService.formatPaginationHeader(page, docTotalPages, 1, `${fileName} content`); const docPageContent = contentChunks[page - 1]; return { content: [{ type: 'text', text: `${paginationHeader}\n\n${docPageContent}` }] }; - src/index.js:1079-1111 (helper)Helper method that formats a single document with metadata (title, description, keywords, content) and cross-reference suggestions
async formatSingleDocument(doc) { if (!doc) { return 'Document not found'; } let output = `# ${doc.metadata?.title || doc.fileName}\n\n`; if (doc.metadata?.description) { output += `**Description:** ${doc.metadata.description}\n\n`; } if (doc.metadata?.keywords) { output += `**Keywords:** ${Array.isArray(doc.metadata.keywords) ? doc.metadata.keywords.join(', ') : doc.metadata.keywords}\n\n`; } output += `**File:** ${doc.fileName}\n\n`; output += '---\n\n'; output += doc.content; // Add cross-reference suggestions output += `\n\n---\n\n`; output += `## š Related Documentation:\n`; if (doc.metadata?.keywords && doc.metadata.keywords.length > 0) { output += `Consider searching for related topics:\n`; const keywords = Array.isArray(doc.metadata.keywords) ? doc.metadata.keywords : [doc.metadata.keywords]; keywords.slice(0, 3).forEach(keyword => { output += `- \`search_documentation("${keyword}")\`\n`; }); } output += `\nš” **Tip:** If this document is missing information, capture updates with \`create_or_update_rule\`.\n`; return output; } - Helper method on DocumentationService that retrieves a document by fileName from the internal Map
getDocument(fileName) { return this.documents.get(fileName); } - Helper method that chunks large text content for pagination, used when a document exceeds 20K tokens
chunkText(text, targetTokens = 20000) { if (!text) { return [text]; } // If text is under the token limit, return as-is const totalTokens = this.estimateTokens(text); if (totalTokens <= targetTokens) { return [text]; } // Estimate characters per token for this specific text const targetChars = TokenEstimator.estimateCharsForTokens(text, targetTokens); const chunks = []; // Check if text has line breaks if (text.includes('\n')) { const lines = text.split('\n'); let currentChunk = ''; for (const line of lines) { const testChunk = currentChunk + line + '\n'; const testTokens = this.estimateTokens(testChunk); if (testTokens > targetTokens) { if (currentChunk) { chunks.push(currentChunk.trim()); currentChunk = line + '\n'; } else { // Single line too long, split it by words const words = line.split(' '); let wordChunk = ''; for (const word of words) { const testWordChunk = wordChunk + word + ' '; const wordChunkTokens = this.estimateTokens(testWordChunk); if (wordChunkTokens > targetTokens) { if (wordChunk) { chunks.push(wordChunk.trim()); } wordChunk = word + ' '; } else { wordChunk = testWordChunk; } } if (wordChunk) { currentChunk = wordChunk + '\n'; } } } else { currentChunk = testChunk; } } if (currentChunk) { chunks.push(currentChunk.trim()); } } else { // No line breaks, split by estimated token boundaries let i = 0; while (i < text.length) { let endPos = Math.min(i + targetChars, text.length); // Try to break on word boundaries if (endPos < text.length) { const nextSpace = text.indexOf(' ', endPos); const prevSpace = text.lastIndexOf(' ', endPos); if (prevSpace > i && (endPos - prevSpace) < (nextSpace - endPos)) { endPos = prevSpace; } else if (nextSpace !== -1 && (nextSpace - endPos) < 100) { endPos = nextSpace; } } const chunk = text.slice(i, endPos); chunks.push(chunk); i = endPos; // Skip whitespace at the beginning of next chunk while (i < text.length && text[i] === ' ') { i++; } } } return chunks; }