#!/usr/bin/env node
import { Server } from "@modelcontextprotocol/sdk/server/index.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import {
CallToolRequestSchema,
ListResourcesRequestSchema,
ListToolsRequestSchema,
ReadResourceRequestSchema,
} from "@modelcontextprotocol/sdk/types.js";
import Database from "better-sqlite3";
import { join } from "path";
import { homedir } from "os";
import { mkdirSync, existsSync } from "fs";
// Database setup
const dbDir = join(homedir(), ".context-persistence-mcp");
if (!existsSync(dbDir)) {
mkdirSync(dbDir, { recursive: true });
}
const dbPath = join(dbDir, "context.db");
const db = new Database(dbPath);
// Enable SQLite optimizations
db.pragma('journal_mode = WAL'); // Write-Ahead Logging for better concurrency
db.pragma('synchronous = NORMAL'); // Faster writes with good safety
db.pragma('cache_size = -64000'); // 64MB cache
db.pragma('temp_store = MEMORY'); // Use memory for temp tables
// Initialize database schema
db.exec(`
CREATE TABLE IF NOT EXISTS messages (
id INTEGER PRIMARY KEY AUTOINCREMENT,
workspace_path TEXT NOT NULL,
role TEXT NOT NULL,
content TEXT NOT NULL,
timestamp DATETIME DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS code_entities (
id INTEGER PRIMARY KEY AUTOINCREMENT,
message_id INTEGER NOT NULL,
entity_type TEXT NOT NULL,
entity_name TEXT NOT NULL,
confidence REAL DEFAULT 1.0,
FOREIGN KEY (message_id) REFERENCES messages(id) ON DELETE CASCADE
);
CREATE TABLE IF NOT EXISTS workspace_metadata (
workspace_path TEXT PRIMARY KEY,
last_active DATETIME DEFAULT CURRENT_TIMESTAMP,
message_count INTEGER DEFAULT 0
);
-- Workspace aliases for smart search (e.g., "etl-ca" -> ["etl-ca", "etl", "ca"])
CREATE TABLE IF NOT EXISTS workspace_aliases (
id INTEGER PRIMARY KEY AUTOINCREMENT,
workspace_path TEXT NOT NULL,
alias TEXT NOT NULL,
alias_type TEXT NOT NULL, -- 'full', 'part', 'acronym'
UNIQUE(workspace_path, alias),
FOREIGN KEY (workspace_path) REFERENCES workspace_metadata(workspace_path) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_workspace ON messages(workspace_path);
CREATE INDEX IF NOT EXISTS idx_timestamp ON messages(timestamp);
CREATE INDEX IF NOT EXISTS idx_entities ON code_entities(entity_name);
CREATE INDEX IF NOT EXISTS idx_alias_lookup ON workspace_aliases(alias);
-- FTS5 Virtual Table for fast full-text search
CREATE VIRTUAL TABLE IF NOT EXISTS messages_fts USING fts5(
content,
workspace_path UNINDEXED,
content='messages',
content_rowid='id'
);
-- Triggers to keep FTS5 table in sync with messages table
CREATE TRIGGER IF NOT EXISTS messages_fts_insert
AFTER INSERT ON messages BEGIN
INSERT INTO messages_fts(rowid, content, workspace_path)
VALUES (new.id, new.content, new.workspace_path);
END;
CREATE TRIGGER IF NOT EXISTS messages_fts_update
AFTER UPDATE ON messages BEGIN
UPDATE messages_fts
SET content = new.content, workspace_path = new.workspace_path
WHERE rowid = new.id;
END;
CREATE TRIGGER IF NOT EXISTS messages_fts_delete
AFTER DELETE ON messages BEGIN
DELETE FROM messages_fts WHERE rowid = old.id;
END;
`);
// Workspace index cache for fast lookups
const workspaceIndex = new Map<string, string>(); // alias -> full_path
// Helper: Extract workspace name and variations from path
function extractWorkspaceAliases(workspacePath: string): Array<{ alias: string; type: string }> {
const aliases: Array<{ alias: string; type: string }> = [];
const seen = new Set<string>();
// Get the workspace name from path (last segment)
const pathSegments = workspacePath.split('/');
const workspaceName = pathSegments[pathSegments.length - 1];
if (!workspaceName) return aliases;
// 1. Full name (normalized)
const fullName = workspaceName.toLowerCase();
if (!seen.has(fullName)) {
aliases.push({ alias: fullName, type: 'full' });
seen.add(fullName);
}
// 2. Split by delimiters (-, _, space) and add parts
const parts = workspaceName.split(/[-_\s]+/).filter(p => p.length > 0);
parts.forEach(part => {
const normalized = part.toLowerCase();
if (normalized.length > 1 && !seen.has(normalized)) {
aliases.push({ alias: normalized, type: 'part' });
seen.add(normalized);
}
});
// 3. Extract acronyms (e.g., "etl-ca" -> "etlca", "ETL")
if (parts.length > 1) {
// Concatenated parts
const concatenated = parts.join('').toLowerCase();
if (!seen.has(concatenated)) {
aliases.push({ alias: concatenated, type: 'acronym' });
seen.add(concatenated);
}
// First letters (e.g., etl-ca -> ec)
const acronym = parts.map(p => p[0]).join('').toLowerCase();
if (acronym.length > 1 && !seen.has(acronym)) {
aliases.push({ alias: acronym, type: 'acronym' });
seen.add(acronym);
}
}
return aliases;
}
// Helper: Update workspace aliases in DB and cache
function updateWorkspaceAliases(workspacePath: string) {
const aliases = extractWorkspaceAliases(workspacePath);
if (aliases.length === 0) return;
// Insert aliases into DB (ignore duplicates)
const stmt = db.prepare(`
INSERT OR IGNORE INTO workspace_aliases (workspace_path, alias, alias_type)
VALUES (?, ?, ?)
`);
aliases.forEach(({ alias, type }) => {
stmt.run(workspacePath, alias, type);
// Update in-memory cache
workspaceIndex.set(alias, workspacePath);
});
}
// Helper: Build workspace index from DB on startup
function buildWorkspaceIndex() {
const aliases = db.prepare(`
SELECT alias, workspace_path FROM workspace_aliases
`).all() as Array<{ alias: string; workspace_path: string }>;
aliases.forEach(({ alias, workspace_path }) => {
workspaceIndex.set(alias, workspace_path);
});
console.error(`Loaded ${workspaceIndex.size} workspace aliases`);
}
// Helper: Smart workspace detection from query
function detectWorkspaceFromQuery(query: string): {
detectedWorkspaces: string[];
remainingQuery: string;
matchedAliases: string[];
} {
const tokens = query.toLowerCase().split(/\s+/);
const detectedWorkspaces: string[] = [];
const matchedAliases: string[] = [];
const unmatchedTokens: string[] = [];
// Try to match tokens against workspace aliases
// Priority: full name matches first, then parts
const fullMatches = new Set<string>();
const partMatches = new Set<string>();
tokens.forEach(token => {
const cleanToken = token.replace(/[^a-z0-9-]/g, '');
if (cleanToken.length === 0) {
unmatchedTokens.push(token);
return;
}
// Check exact match
const workspacePath = workspaceIndex.get(cleanToken);
if (workspacePath) {
if (!fullMatches.has(workspacePath)) {
detectedWorkspaces.push(workspacePath);
fullMatches.add(workspacePath);
matchedAliases.push(cleanToken);
}
} else {
unmatchedTokens.push(token);
}
});
// Try multi-word combinations (e.g., "etl ca" -> "etl-ca")
if (detectedWorkspaces.length === 0 && tokens.length > 1) {
for (let i = 0; i < tokens.length - 1; i++) {
const combined = tokens[i] + tokens[i + 1];
const workspacePath = workspaceIndex.get(combined);
if (workspacePath && !fullMatches.has(workspacePath)) {
detectedWorkspaces.push(workspacePath);
fullMatches.add(workspacePath);
matchedAliases.push(combined);
// Remove matched tokens from unmatched list
const idx1 = unmatchedTokens.indexOf(tokens[i]);
const idx2 = unmatchedTokens.indexOf(tokens[i + 1]);
if (idx1 > -1) unmatchedTokens.splice(idx1, 1);
if (idx2 > -1) unmatchedTokens.splice(idx2, 1);
}
}
}
return {
detectedWorkspaces: [...new Set(detectedWorkspaces)],
remainingQuery: unmatchedTokens.join(' ').trim(),
matchedAliases
};
}
// Helper: Extract code entities from text (generic pattern matching)
function extractCodeEntities(text: string): Array<{ type: string; name: string; confidence: number }> {
const entities: Array<{ type: string; name: string; confidence: number }> = [];
const seen = new Set<string>();
// Helper to add entity with deduplication
const addEntity = (type: string, name: string, confidence = 1.0) => {
const key = `${type}:${name.toLowerCase()}`;
if (!seen.has(key) && name.length > 1) {
entities.push({ type, name, confidence });
seen.add(key);
}
};
// 1. Extract PascalCase identifiers (classes, components, types) - language agnostic
const pascalCasePattern = /\b([A-Z][a-z0-9]+(?:[A-Z][a-z0-9]+)+)\b/g;
[...text.matchAll(pascalCasePattern)].forEach(match => {
addEntity('identifier', match[1], 0.9);
});
// 2. Extract camelCase identifiers (methods, variables) - language agnostic
const camelCasePattern = /\b([a-z]+[A-Z][a-zA-Z0-9]*)\b/g;
[...text.matchAll(camelCasePattern)].forEach(match => {
if (match[1].length > 3) { // Skip very short ones
addEntity('identifier', match[1], 0.7);
}
});
// 3. Extract CONSTANTS (SCREAMING_SNAKE_CASE)
const constantPattern = /\b([A-Z][A-Z0-9_]{2,})\b/g;
[...text.matchAll(constantPattern)].forEach(match => {
// Exclude common acronyms that aren't constants
if (!['HTTP', 'HTTPS', 'API', 'URL', 'URI', 'SQL', 'XML', 'JSON', 'HTML', 'CSS'].includes(match[1])) {
addEntity('constant', match[1], 0.8);
}
});
// 4. Extract kebab-case identifiers (modern frameworks, components)
const kebabPattern = /\b([a-z][a-z0-9]*(?:-[a-z0-9]+)+)\b/g;
[...text.matchAll(kebabPattern)].forEach(match => {
if (match[1].length > 4) { // Skip very short ones like "e-x"
addEntity('identifier', match[1], 0.8);
}
});
// 5. Extract snake_case identifiers (Python, Ruby, databases)
const snakeCasePattern = /\b([a-z][a-z0-9]*(?:_[a-z0-9]+)+)\b/g;
[...text.matchAll(snakeCasePattern)].forEach(match => {
if (match[1].length > 4) {
addEntity('identifier', match[1], 0.8);
}
});
// 6. Extract file references (any extension, 2-10 chars)
const filePattern = /\b([a-zA-Z0-9_-]+\.[a-z0-9]{2,10})\b/gi;
[...text.matchAll(filePattern)].forEach(match => {
addEntity('file', match[1].toLowerCase(), 1.0);
});
// 7. Extract inline code (marked as important by user with backticks)
const inlineCodePattern = /`([^`]{2,50})`/g;
[...text.matchAll(inlineCodePattern)].forEach(match => {
const code = match[1].trim();
if (code.length > 2 && !code.includes(' ')) {
addEntity('code', code, 0.95);
}
});
// 8. Extract quoted technical terms
const quotedPattern = /"([^"]{3,40})"/g;
[...text.matchAll(quotedPattern)].forEach(match => {
const term = match[1].trim();
// Only add if looks technical (has special chars, camelCase, PascalCase, etc.)
if (/[A-Z_-]/.test(term) || /[a-z][A-Z]/.test(term)) {
addEntity('term', term, 0.85);
}
});
// 9. Extract important technical words (frequency-based)
const words = text.toLowerCase().match(/\b[a-z]{4,20}\b/g) || [];
const wordFreq = new Map<string, number>();
words.forEach(word => {
wordFreq.set(word, (wordFreq.get(word) || 0) + 1);
});
// Common stop words to exclude
const stopWords = new Set([
'that', 'this', 'with', 'from', 'have', 'been', 'were', 'said',
'would', 'could', 'should', 'there', 'their', 'about', 'which',
'when', 'where', 'what', 'they', 'them', 'these', 'those',
'will', 'your', 'more', 'some', 'other', 'into', 'than',
'then', 'also', 'only', 'over', 'after', 'most', 'such',
'make', 'like', 'time', 'just', 'know', 'take', 'people',
'year', 'good', 'work', 'well', 'back', 'call', 'want',
'need', 'very', 'even', 'much', 'here', 'both', 'each',
'find', 'still', 'long', 'great', 'come', 'give', 'through'
]);
// Add words that appear 2+ times and aren't stop words
wordFreq.forEach((count, word) => {
if (count >= 2 && !stopWords.has(word) && word.length > 4) {
// Higher frequency = higher confidence, but cap at 0.6
const confidence = Math.min(count / 10, 0.6);
addEntity('keyword', word, confidence);
}
});
// Sort by confidence (highest first) and limit to top 50 most relevant
return entities
.sort((a, b) => b.confidence - a.confidence)
.slice(0, 50);
}
// Helper: Save message to database
function saveMessage(workspacePath: string, role: string, content: string): number {
const stmt = db.prepare("INSERT INTO messages (workspace_path, role, content) VALUES (?, ?, ?)");
const result = stmt.run(workspacePath, role, content);
const messageId = result.lastInsertRowid as number;
// Extract and save code entities
const entities = extractCodeEntities(content);
if (entities.length > 0) {
const entityStmt = db.prepare("INSERT INTO code_entities (message_id, entity_type, entity_name, confidence) VALUES (?, ?, ?, ?)");
entities.forEach(entity => {
entityStmt.run(messageId, entity.type, entity.name, entity.confidence);
});
}
// Update workspace metadata
db.prepare(`
INSERT INTO workspace_metadata (workspace_path, last_active, message_count)
VALUES (?, CURRENT_TIMESTAMP, 1)
ON CONFLICT(workspace_path)
DO UPDATE SET last_active = CURRENT_TIMESTAMP, message_count = message_count + 1
`).run(workspacePath);
// Update workspace aliases for smart search
updateWorkspaceAliases(workspacePath);
return messageId;
}
// Helper: Get related context based on code entities
function getRelatedContext(currentWorkspace: string, entities: string[], limit: number = 10) {
if (entities.length === 0) {
return [];
}
const placeholders = entities.map(() => "?").join(",");
const query = `
SELECT DISTINCT
m.id,
m.workspace_path,
m.role,
m.content,
m.timestamp,
GROUP_CONCAT(DISTINCT ce.entity_name) as matched_entities
FROM messages m
JOIN code_entities ce ON m.id = ce.message_id
WHERE ce.entity_name IN (${placeholders})
AND m.workspace_path != ?
GROUP BY m.id
ORDER BY m.timestamp DESC
LIMIT ?
`;
return db.prepare(query).all(...entities, currentWorkspace, limit);
}
// Create MCP server
const server = new Server(
{
name: "context-persistence-mcp-server",
version: "1.0.0",
},
{
capabilities: {
tools: {},
resources: {},
},
}
);
// List available tools
server.setRequestHandler(ListToolsRequestSchema, async () => {
return {
tools: [
{
name: "save_conversation",
description: "Save the current conversation message to persistent storage. This allows context to be shared across VS Code windows.",
inputSchema: {
type: "object",
properties: {
workspace_path: {
type: "string",
description: "Absolute path to the current workspace/repository",
},
role: {
type: "string",
enum: ["user", "assistant"],
description: "Who sent the message (user or assistant)",
},
content: {
type: "string",
description: "The message content to save",
},
},
required: ["workspace_path", "role", "content"],
},
},
{
name: "get_related_context",
description: "Search for related conversations from other workspaces based on code entities (classes, files, methods). Use this to find relevant context when analyzing code.",
inputSchema: {
type: "object",
properties: {
current_workspace: {
type: "string",
description: "Current workspace path (to exclude from results)",
},
entities: {
type: "array",
items: { type: "string" },
description: "List of code entities to search for (class names, file names, method names)",
},
limit: {
type: "number",
description: "Maximum number of results (default: 10)",
default: 10,
},
},
required: ["current_workspace", "entities"],
},
},
{
name: "search_conversations",
description: "Search through all saved conversations using full-text search. Supports boolean operators (AND, OR, NOT), phrases in quotes, and prefix matching with *. Useful for finding discussions across all workspaces.",
inputSchema: {
type: "object",
properties: {
query: {
type: "string",
description: "Search query using FTS5 syntax. Examples: 'AuthService', 'authentication AND security', '\"user login\"', 'Auth*'",
},
workspace_path: {
type: "string",
description: "Optional: limit search to specific workspace",
},
limit: {
type: "number",
description: "Maximum results (default: 20)",
default: 20,
},
},
required: ["query"],
},
},
{
name: "get_workspace_summary",
description: "Get summary of all workspaces with saved context, including message counts and last activity.",
inputSchema: {
type: "object",
properties: {},
},
},
{
name: "smart_search",
description: "Intelligent search that automatically detects workspace names in your query and searches accordingly. Just ask naturally like 'show me etl-ca pipeline' or 'what's in k2'. Supports workspace name detection, keyword search, and FTS5 full-text search.",
inputSchema: {
type: "object",
properties: {
query: {
type: "string",
description: "Natural language query. Can include workspace names (etl-ca, k2, etc.) and keywords. Examples: 'etl-ca DAG', 'k2 authentication', 'Spring Security'",
},
limit: {
type: "number",
description: "Maximum results (default: 20)",
default: 20,
},
},
required: ["query"],
},
},
],
};
});
// Handle tool calls
server.setRequestHandler(CallToolRequestSchema, async (request) => {
const { name, arguments: args } = request.params;
try {
switch (name) {
case "save_conversation": {
const { workspace_path, role, content } = args as {
workspace_path: string;
role: string;
content: string;
};
const messageId = saveMessage(workspace_path, role, content);
const entities = extractCodeEntities(content);
return {
content: [
{
type: "text",
text: JSON.stringify({
success: true,
message_id: messageId,
entities_extracted: entities.length,
entities: entities,
}, null, 2),
},
],
};
}
case "get_related_context": {
const { current_workspace, entities, limit = 10 } = args as {
current_workspace: string;
entities: string[];
limit?: number;
};
const results = getRelatedContext(current_workspace, entities, limit);
return {
content: [
{
type: "text",
text: JSON.stringify({
success: true,
current_workspace,
searched_entities: entities,
found_conversations: results.length,
conversations: results,
}, null, 2),
},
],
};
}
case "search_conversations": {
const { query, workspace_path, limit = 20 } = args as {
query: string;
workspace_path?: string;
limit?: number;
};
// Use FTS5 for fast full-text search with relevance ranking
let sql = `
SELECT
m.id,
m.workspace_path,
m.role,
m.content,
m.timestamp,
bm25(messages_fts) as relevance_score,
snippet(messages_fts, 0, '**', '**', '...', 64) as snippet
FROM messages_fts
JOIN messages m ON messages_fts.rowid = m.id
WHERE messages_fts MATCH ?
`;
const params: any[] = [query];
if (workspace_path) {
sql += " AND m.workspace_path = ?";
params.push(workspace_path);
}
sql += " ORDER BY relevance_score LIMIT ?";
params.push(limit);
const results = db.prepare(sql).all(...params);
return {
content: [
{
type: "text",
text: JSON.stringify({
success: true,
query,
found: results.length,
search_type: "FTS5 (full-text)",
conversations: results,
}, null, 2),
},
],
};
}
case "get_workspace_summary": {
const results = db.prepare(`
SELECT
workspace_path,
message_count,
last_active
FROM workspace_metadata
ORDER BY last_active DESC
`).all();
return {
content: [
{
type: "text",
text: JSON.stringify({
success: true,
total_workspaces: results.length,
workspaces: results,
}, null, 2),
},
],
};
}
case "smart_search": {
const { query, limit = 20 } = args as {
query: string;
limit?: number;
};
// Detect workspace mentions in query
const detection = detectWorkspaceFromQuery(query);
let results;
let searchStrategy: string;
if (detection.detectedWorkspaces.length > 0) {
// Workspace-targeted search
searchStrategy = `workspace-targeted (${detection.matchedAliases.join(', ')})`;
const workspacePlaceholders = detection.detectedWorkspaces.map(() => '?').join(',');
if (detection.remainingQuery.length > 0) {
// Search with keywords within detected workspaces
const sql = `
SELECT
m.id,
m.workspace_path,
m.role,
m.content,
m.timestamp,
bm25(messages_fts) as relevance_score,
snippet(messages_fts, 0, '**', '**', '...', 64) as snippet
FROM messages_fts
JOIN messages m ON messages_fts.rowid = m.id
WHERE m.workspace_path IN (${workspacePlaceholders})
AND messages_fts MATCH ?
ORDER BY relevance_score
LIMIT ?
`;
results = db.prepare(sql).all(...detection.detectedWorkspaces, detection.remainingQuery, limit);
} else {
// Return all messages from detected workspaces
const sql = `
SELECT
m.id,
m.workspace_path,
m.role,
m.content,
m.timestamp
FROM messages m
WHERE m.workspace_path IN (${workspacePlaceholders})
ORDER BY m.timestamp DESC
LIMIT ?
`;
results = db.prepare(sql).all(...detection.detectedWorkspaces, limit);
}
} else {
// Global FTS5 search across all workspaces
searchStrategy = 'global-search';
const sql = `
SELECT
m.id,
m.workspace_path,
m.role,
m.content,
m.timestamp,
bm25(messages_fts) as relevance_score,
snippet(messages_fts, 0, '**', '**', '...', 64) as snippet
FROM messages_fts
JOIN messages m ON messages_fts.rowid = m.id
WHERE messages_fts MATCH ?
ORDER BY relevance_score
LIMIT ?
`;
results = db.prepare(sql).all(query, limit);
}
return {
content: [
{
type: "text",
text: JSON.stringify({
success: true,
query,
search_strategy: searchStrategy,
detected_workspaces: detection.detectedWorkspaces,
matched_aliases: detection.matchedAliases,
remaining_query: detection.remainingQuery,
found: results.length,
conversations: results,
}, null, 2),
},
],
};
}
default:
return {
content: [{ type: "text", text: JSON.stringify({ error: `Unknown tool: ${name}` }) }],
isError: true,
};
}
} catch (error) {
return {
content: [{
type: "text",
text: JSON.stringify({
error: error instanceof Error ? error.message : String(error),
}),
}],
isError: true,
};
}
});
// List available resources
server.setRequestHandler(ListResourcesRequestSchema, async () => {
const workspaces = db.prepare("SELECT workspace_path FROM workspace_metadata ORDER BY last_active DESC").all();
return {
resources: [
{
uri: "context://recent",
name: "Recent conversations across all workspaces",
description: "Last 50 messages from all workspaces to provide cross-window context",
mimeType: "application/json",
},
...workspaces.map((ws: any) => ({
uri: `context://workspace/${encodeURIComponent(ws.workspace_path)}`,
name: `Context for ${ws.workspace_path.split("/").pop()}`,
description: `Conversation history for workspace: ${ws.workspace_path}`,
mimeType: "application/json",
})),
],
};
});
// Read resource content
server.setRequestHandler(ReadResourceRequestSchema, async (request) => {
const uri = request.params.uri;
try {
if (uri === "context://recent") {
const messages = db.prepare(`
SELECT m.*, GROUP_CONCAT(DISTINCT ce.entity_name) as entities
FROM messages m
LEFT JOIN code_entities ce ON m.id = ce.message_id
GROUP BY m.id
ORDER BY m.timestamp DESC
LIMIT 50
`).all();
return {
contents: [
{
uri,
mimeType: "application/json",
text: JSON.stringify({
description: "Recent conversations across all workspaces",
message_count: messages.length,
messages,
}, null, 2),
},
],
};
}
if (uri.startsWith("context://workspace/")) {
const workspacePath = decodeURIComponent(uri.replace("context://workspace/", ""));
const messages = db.prepare(`
SELECT m.*, GROUP_CONCAT(DISTINCT ce.entity_name) as entities
FROM messages m
LEFT JOIN code_entities ce ON m.id = ce.message_id
WHERE m.workspace_path = ?
GROUP BY m.id
ORDER BY m.timestamp DESC
LIMIT 100
`).all(workspacePath);
return {
contents: [
{
uri,
mimeType: "application/json",
text: JSON.stringify({
workspace_path: workspacePath,
message_count: messages.length,
messages,
}, null, 2),
},
],
};
}
return {
contents: [
{
uri,
mimeType: "text/plain",
text: "Resource not found",
},
],
};
} catch (error) {
return {
contents: [
{
uri,
mimeType: "text/plain",
text: `Error: ${error instanceof Error ? error.message : String(error)}`,
},
],
};
}
});
// Start server
async function main() {
// Build workspace index on startup
buildWorkspaceIndex();
const transport = new StdioServerTransport();
await server.connect(transport);
console.error("Context Persistence MCP Server running on stdio");
console.error(`Database location: ${dbPath}`);
}
main().catch((error) => {
console.error("Fatal error:", error);
process.exit(1);
});