Skip to main content
Glama

aidex_init

Initialize indexing for a project by scanning source files to build a searchable index of code identifiers, methods, types, and signatures.

Instructions

Initialize AiDex indexing for a project. Scans all source files and builds a searchable index of identifiers, methods, types, and signatures.

Input Schema

TableJSON Schema
NameRequiredDescriptionDefault
pathYesAbsolute path to the project directory to index
nameNoOptional project name (defaults to directory name)
excludeNoAdditional glob patterns to exclude (e.g., ["**/test/**"])

Implementation Reference

  • Tool schema definition for aidex_init - defines the input schema with path (required), name (optional), and exclude (optional) parameters, along with the tool description.
        name: `${TOOL_PREFIX}init`,
        description: `Initialize ${PRODUCT_NAME} indexing for a project. Scans all source files and builds a searchable index of identifiers, methods, types, and signatures.`,
        inputSchema: {
            type: 'object',
            properties: {
                path: {
                    type: 'string',
                    description: 'Absolute path to the project directory to index',
                },
                name: {
                    type: 'string',
                    description: 'Optional project name (defaults to directory name)',
                },
                exclude: {
                    type: 'array',
                    items: { type: 'string' },
                    description: 'Additional glob patterns to exclude (e.g., ["**/test/**"])',
                },
            },
            required: ['path'],
        },
    },
  • Tool routing registration - maps the aidex_init tool name to the handleInit handler function in the switch statement.
    case `${TOOL_PREFIX}init`:
        return await handleInit(args);
  • handleInit function - validates input parameters, calls the core init function, and formats the response with statistics including files indexed, items found, methods found, types found, and duration.
    async function handleInit(args: Record<string, unknown>): Promise<{ content: Array<{ type: string; text: string }> }> {
        const path = args.path as string;
        if (!path) {
            return {
                content: [{ type: 'text', text: 'Error: path parameter is required' }],
            };
        }
    
        const result = await init({
            path,
            name: args.name as string | undefined,
            exclude: args.exclude as string[] | undefined,
        });
    
        if (result.success) {
            let message = `✓ ${PRODUCT_NAME} initialized for project\n\n`;
            message += `Database: ${result.indexPath}/index.db\n`;
            message += `Files indexed: ${result.filesIndexed}`;
            if (result.filesSkipped > 0) {
                message += ` (${result.filesSkipped} unchanged, skipped)`;
            }
            message += `\n`;
            if (result.filesRemoved > 0) {
                message += `Files removed: ${result.filesRemoved} (now excluded)\n`;
            }
            message += `Items found: ${result.itemsFound}\n`;
            message += `Methods found: ${result.methodsFound}\n`;
            message += `Types found: ${result.typesFound}\n`;
            message += `Duration: ${result.durationMs}ms`;
    
            if (result.errors.length > 0) {
                message += `\n\nWarnings (${result.errors.length}):\n`;
                message += result.errors.slice(0, 10).map(e => `  - ${e}`).join('\n');
                if (result.errors.length > 10) {
                    message += `\n  ... and ${result.errors.length - 10} more`;
                }
            }
    
            return {
                content: [{ type: 'text', text: message }],
            };
        } else {
            return {
                content: [{ type: 'text', text: `Error: ${result.errors.join(', ')}` }],
            };
        }
    }
  • Core init function implementation - creates the .aidex index directory, opens/creates database, scans source files with glob patterns, indexes each file (extracting methods, types, identifiers), manages incremental indexing, cleans up excluded files, scans project structure, and returns comprehensive results.
    export async function init(params: InitParams): Promise<InitResult> {
        const startTime = Date.now();
        const errors: string[] = [];
    
        // Validate project path
        if (!existsSync(params.path)) {
            return {
                success: false,
                indexPath: '',
                filesIndexed: 0,
                filesSkipped: 0,
                filesRemoved: 0,
                itemsFound: 0,
                methodsFound: 0,
                typesFound: 0,
                durationMs: Date.now() - startTime,
                errors: [`Project path does not exist: ${params.path}`],
            };
        }
    
        const stat = statSync(params.path);
        if (!stat.isDirectory()) {
            return {
                success: false,
                indexPath: '',
                filesIndexed: 0,
                filesSkipped: 0,
                filesRemoved: 0,
                itemsFound: 0,
                methodsFound: 0,
                typesFound: 0,
                durationMs: Date.now() - startTime,
                errors: [`Path is not a directory: ${params.path}`],
            };
        }
    
        // Create index directory
        const indexDir = join(params.path, INDEX_DIR);
        if (!existsSync(indexDir)) {
            mkdirSync(indexDir, { recursive: true });
        }
    
        const dbPath = join(indexDir, 'index.db');
        const projectName = params.name ?? basename(params.path);
    
        // Determine if incremental (default) or fresh re-index
        const dbExists = existsSync(dbPath);
        const incremental = dbExists && !params.fresh;
    
        // Create database (incremental keeps existing data)
        const db = createDatabase(dbPath, projectName, params.path, incremental);
        const queries = createQueries(db);
    
        // Build glob pattern for supported files
        const extensions = getSupportedExtensions();
        const patterns = extensions.map(ext => `**/*${ext}`);
    
        // Merge exclude patterns (including .gitignore)
        const gitignorePatterns = readGitignore(params.path);
        const exclude = [...DEFAULT_EXCLUDE, ...gitignorePatterns, ...(params.exclude ?? [])];
    
        // Find all source files
        let files: string[] = [];
        for (const pattern of patterns) {
            const found = await glob(pattern, {
                cwd: params.path,
                ignore: exclude,
                nodir: true,
                absolute: false,
            });
            files.push(...found);
        }
    
        // Remove duplicates, normalize to forward slashes, and sort
        files = [...new Set(files)].map(f => f.replace(/\\/g, '/')).sort();
    
        // Index each file
        let filesIndexed = 0;
        let filesSkipped = 0;
        let totalItems = 0;
        let totalMethods = 0;
        let totalTypes = 0;
    
        // Use transaction for bulk insert
        db.transaction(() => {
            for (const filePath of files) {
                try {
                    const result = indexFile(params.path, filePath, db, queries, incremental);
                    if (result.skipped) {
                        filesSkipped++;
                    } else if (result.success) {
                        filesIndexed++;
                        totalItems += result.items;
                        totalMethods += result.methods;
                        totalTypes += result.types;
                    } else if (result.error) {
                        errors.push(`${filePath}: ${result.error}`);
                    }
                } catch (err) {
                    errors.push(`${filePath}: ${err instanceof Error ? err.message : String(err)}`);
                }
            }
        });
    
        // Cleanup unused items
        queries.deleteUnusedItems();
    
        // --------------------------------------------------------
        // Cleanup: Remove files that are now excluded
        // (e.g., build/ was indexed before exclude pattern was added)
        // --------------------------------------------------------
        let filesRemoved = 0;
        const existingFiles = queries.getAllFiles();
    
        db.transaction(() => {
            for (const file of existingFiles) {
                // Check if this file path matches any exclude pattern
                const shouldExclude = exclude.some(pattern =>
                    minimatch(file.path, pattern, { dot: true })
                );
    
                if (shouldExclude) {
                    // Remove from index
                    queries.clearFileData(file.id);
                    queries.deleteFile(file.id);
                    filesRemoved++;
                }
            }
        });
    
        if (filesRemoved > 0) {
            // Cleanup items that are now orphaned
            queries.deleteUnusedItems();
        }
    
        // --------------------------------------------------------
        // Scan project structure (all files, not just code)
        // --------------------------------------------------------
        const indexedFilesSet = new Set(files);  // Code files we indexed
    
        // Find ALL files in project
        const allFiles = await glob('**/*', {
            cwd: params.path,
            ignore: exclude,
            nodir: true,
            absolute: false,
        });
    
        // Normalize paths and collect directories
        const directories = new Set<string>();
        const normalizedAllFiles = allFiles.map(f => f.replace(/\\/g, '/'));
    
        for (const filePath of normalizedAllFiles) {
            // Extract all parent directories
            const parts = filePath.split('/');
            for (let i = 1; i < parts.length; i++) {
                directories.add(parts.slice(0, i).join('/'));
            }
        }
    
        // Insert directories
        db.transaction(() => {
            for (const dir of directories) {
                queries.insertProjectFile(dir, 'dir', null, false);
            }
    
            // Insert all files with type detection
            for (const filePath of normalizedAllFiles) {
                const ext = extname(filePath).toLowerCase() || null;
                const fileType = detectFileType(filePath);
                const isIndexed = indexedFilesSet.has(filePath);
                queries.insertProjectFile(filePath, fileType, ext, isIndexed);
            }
        });
    
        // Reset session tracking after full re-index
        const now = Date.now().toString();
        db.setMetadata('last_session_start', now);
        db.setMetadata('last_session_end', now);
        db.setMetadata('current_session_start', now);
    
        db.close();
    
        return {
            success: true,
            indexPath: indexDir,
            filesIndexed,
            filesSkipped,
            filesRemoved,
            itemsFound: totalItems,
            methodsFound: totalMethods,
            typesFound: totalTypes,
            durationMs: Date.now() - startTime,
            errors,
        };
    }
  • indexFile helper function - indexes a single file by reading content, calculating hash, checking for incremental skip, extracting data (lines, items, methods, types), and inserting into database with proper transaction handling.
    function indexFile(
        projectPath: string,
        relativePath: string,
        db: AiDexDatabase,
        queries: Queries,
        incremental: boolean = false
    ): IndexFileResult {
        const absolutePath = join(projectPath, relativePath);
    
        // Read file content
        let content: string;
        try {
            content = readFileSync(absolutePath, 'utf-8');
        } catch (err) {
            return {
                success: false,
                items: 0,
                methods: 0,
                types: 0,
                error: `Cannot read file: ${err instanceof Error ? err.message : String(err)}`,
            };
        }
    
        // Calculate hash
        const hash = shortHash(content);
    
        // In incremental mode, skip unchanged files
        if (incremental) {
            const existingFile = queries.getFileByPath(relativePath);
            if (existingFile && existingFile.hash === hash) {
                return {
                    success: true,
                    skipped: true,
                    items: 0,
                    methods: 0,
                    types: 0,
                };
            }
            // File changed - clear old data before re-indexing
            if (existingFile) {
                queries.clearFileData(existingFile.id);
                queries.deleteFile(existingFile.id);
            }
        }
    
        // Extract data from file
        const extraction = extract(content, relativePath);
        if (!extraction) {
            return {
                success: false,
                items: 0,
                methods: 0,
                types: 0,
                error: 'Unsupported file type or parse error',
            };
        }
    
        // Insert file record
        const fileId = queries.insertFile(relativePath, hash);
    
        // Split content into lines for hashing
        const contentLines = content.split('\n');
        const now = Date.now();
    
        // Insert lines with hash
        let lineId = 1;
        for (const line of extraction.lines) {
            const lineContent = contentLines[line.lineNumber - 1] ?? '';
            const lineHash = shortHash(lineContent);
            queries.insertLine(fileId, lineId++, line.lineNumber, line.lineType, lineHash, now);
        }
    
        // Build line number to line ID mapping
        const lineNumberToId = new Map<number, number>();
        lineId = 1;
        for (const line of extraction.lines) {
            lineNumberToId.set(line.lineNumber, lineId++);
        }
    
        // Insert items and occurrences
        const itemsInserted = new Set<string>();
        for (const item of extraction.items) {
            const lineIdForItem = lineNumberToId.get(item.lineNumber);
            if (lineIdForItem === undefined) {
                // Line wasn't recorded, add it now
                const newLineId = lineId++;
                const lineContent = contentLines[item.lineNumber - 1] ?? '';
                const lineHash = shortHash(lineContent);
                queries.insertLine(fileId, newLineId, item.lineNumber, item.lineType, lineHash, now);
                lineNumberToId.set(item.lineNumber, newLineId);
            }
    
            const itemId = queries.getOrCreateItem(item.term);
            const finalLineId = lineNumberToId.get(item.lineNumber)!;
            queries.insertOccurrence(itemId, fileId, finalLineId);
            itemsInserted.add(item.term);
        }
    
        // Insert methods
        for (const method of extraction.methods) {
            queries.insertMethod(
                fileId,
                method.name,
                method.prototype,
                method.lineNumber,
                method.visibility,
                method.isStatic,
                method.isAsync
            );
        }
    
        // Insert types
        for (const type of extraction.types) {
            queries.insertType(fileId, type.name, type.kind, type.lineNumber);
        }
    
        // Insert signature (header comments)
        if (extraction.headerComments.length > 0) {
            queries.insertSignature(fileId, extraction.headerComments.join('\n'));
        }
    
        return {
            success: true,
            items: itemsInserted.size,
            methods: extraction.methods.length,
            types: extraction.types.length,
        };
    }

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/CSCSoftware/AiDex'

If you have feedback or need assistance with the MCP directory API, please join our Discord server