Skip to main content
Glama
N3uraX

Tafa MCP Server

by N3uraX
searchOperations.jsβ€’8.2 kB
import fs from "fs-extra"; import path from "path"; import glob from "fast-glob"; export class SearchOperations { constructor(securityManager) { this.security = securityManager; } async searchFiles(directory, pattern, recursive = true) { try { const validDir = this.security.validatePath(directory); await this.security.checkPermissions(validDir, 'read'); const globPattern = recursive ? `${validDir}/**/${pattern}` : `${validDir}/${pattern}`; const files = await glob(globPattern, { onlyFiles: true, ignore: ['**/node_modules/**', '**/.git/**'] }); // Filter results to ensure they're within allowed directories const validFiles = files.filter(file => { try { this.security.validatePath(file); return true; } catch { return false; } }); const results = []; for (const file of validFiles) { try { const stats = await fs.stat(file); results.push({ path: file, name: path.basename(file), size: stats.size, sizeHuman: this.formatFileSize(stats.size), modified: stats.mtime, relativePath: path.relative(validDir, file) }); } catch (error) { // Skip files we can't access continue; } } const output = this.formatSearchResults(results, pattern, validDir); return { content: [{ type: "text", text: output }] }; } catch (error) { return { content: [{ type: "text", text: `❌ Error searching files: ${error.message}` }] }; } } async searchContent(directory, searchTerm, filePattern = "*", recursive = true) { try { const validDir = this.security.validatePath(directory); await this.security.checkPermissions(validDir, 'read'); const globPattern = recursive ? `${validDir}/**/${filePattern}` : `${validDir}/${filePattern}`; const files = await glob(globPattern, { onlyFiles: true, ignore: ['**/node_modules/**', '**/.git/**', '**/*.{jpg,jpeg,png,gif,pdf,exe,zip,tar,gz}'] }); const results = []; const searchRegex = new RegExp(searchTerm, 'gi'); for (const file of files) { try { // Validate file is within allowed directories this.security.validatePath(file); // Check file size to avoid reading huge files const stats = await fs.stat(file); if (stats.size > 10 * 1024 * 1024) { // Skip files larger than 10MB continue; } const content = await fs.readFile(file, 'utf8'); const lines = content.split('\n'); const matches = []; lines.forEach((line, index) => { if (searchRegex.test(line)) { matches.push({ lineNumber: index + 1, line: line.trim(), match: line.match(searchRegex) }); } }); if (matches.length > 0) { results.push({ path: file, name: path.basename(file), relativePath: path.relative(validDir, file), matches: matches, matchCount: matches.length }); } } catch (error) { // Skip files we can't read or that aren't text files continue; } } const output = this.formatContentSearchResults(results, searchTerm, validDir); return { content: [{ type: "text", text: output }] }; } catch (error) { return { content: [{ type: "text", text: `❌ Error searching content: ${error.message}` }] }; } } async findDuplicates(directory, minSize = 0) { try { const validDir = this.security.validatePath(directory); await this.security.checkPermissions(validDir, 'read'); const files = await glob(`${validDir}/**/*`, { onlyFiles: true, ignore: ['**/node_modules/**', '**/.git/**'] }); const fileHashes = new Map(); const duplicates = []; for (const file of files) { try { const stats = await fs.stat(file); if (stats.size < minSize) continue; const content = await fs.readFile(file); const hash = require('crypto').createHash('md5').update(content).digest('hex'); if (fileHashes.has(hash)) { const existing = fileHashes.get(hash); if (!duplicates.find(group => group.hash === hash)) { duplicates.push({ hash, files: [existing, file], size: stats.size, sizeHuman: this.formatFileSize(stats.size) }); } else { const group = duplicates.find(group => group.hash === hash); group.files.push(file); } } else { fileHashes.set(hash, file); } } catch (error) { // Skip files we can't read continue; } } const output = this.formatDuplicateResults(duplicates, validDir); return { content: [{ type: "text", text: output }] }; } catch (error) { return { content: [{ type: "text", text: `❌ Error finding duplicates: ${error.message}` }] }; } } formatSearchResults(results, pattern, baseDir) { let output = `πŸ” Search Results for "${pattern}" in ${baseDir}\n`; output += `πŸ“Š Found ${results.length} matches\n\n`; if (results.length === 0) { output += "No files found matching the pattern.\n"; return output; } results.forEach((result, index) => { output += `${index + 1}. πŸ“„ ${result.name}\n`; output += ` πŸ“ ${result.relativePath}\n`; output += ` πŸ“Š ${result.sizeHuman} | Modified: ${result.modified.toLocaleString()}\n\n`; }); return output; } formatContentSearchResults(results, searchTerm, baseDir) { let output = `πŸ” Content Search Results for "${searchTerm}" in ${baseDir}\n`; const totalMatches = results.reduce((sum, result) => sum + result.matchCount, 0); output += `πŸ“Š Found ${totalMatches} matches in ${results.length} files\n\n`; if (results.length === 0) { output += "No content matches found.\n"; return output; } results.forEach((result, index) => { output += `${index + 1}. πŸ“„ ${result.name} (${result.matchCount} matches)\n`; output += ` πŸ“ ${result.relativePath}\n`; // Show first 3 matches per file const displayMatches = result.matches.slice(0, 3); displayMatches.forEach(match => { output += ` πŸ“ Line ${match.lineNumber}: ${match.line}\n`; }); if (result.matches.length > 3) { output += ` ... and ${result.matches.length - 3} more matches\n`; } output += '\n'; }); return output; } formatDuplicateResults(duplicates, baseDir) { let output = `πŸ” Duplicate Files in ${baseDir}\n`; output += `πŸ“Š Found ${duplicates.length} duplicate groups\n\n`; if (duplicates.length === 0) { output += "No duplicate files found.\n"; return output; } duplicates.forEach((group, index) => { output += `${index + 1}. πŸ“„ Duplicate Group (${group.sizeHuman} each)\n`; output += ` πŸ”— Hash: ${group.hash}\n`; group.files.forEach((file, fileIndex) => { const relativePath = path.relative(baseDir, file); output += ` ${fileIndex + 1}. ${relativePath}\n`; }); output += '\n'; }); return output; } formatFileSize(bytes) { const sizes = ['Bytes', 'KB', 'MB', 'GB']; if (bytes === 0) return '0 Bytes'; const i = parseInt(Math.floor(Math.log(bytes) / Math.log(1024))); return Math.round(bytes / Math.pow(1024, i) * 100) / 100 + ' ' + sizes[i]; } }

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/N3uraX/tafa-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server