Skip to main content
Glama
index.ts21.3 kB
import { Server } from "@modelcontextprotocol/sdk/server/index.js"; import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; import { ListToolsRequestSchema, CallToolRequestSchema } from "@modelcontextprotocol/sdk/types.js"; import { execFile } from "node:child_process"; import { promisify } from "node:util"; import { readFile, writeFile } from "node:fs/promises"; import * as fs from "node:fs/promises"; import * as path from "node:path"; import YAML from "yaml"; import { hybridCommands } from "./commands/hybrid-pipeline.js"; import { loadSecurityConfig, validateMapfilePath, validateMapfileSize, sanitizeMapfile, compareWithReviewExtention } from "./config/security.js"; const execp = promisify(execFile); type Allow = { blocks: string[], inline: string[] }; // --- Minimal conservative allowlist (works as a starting point) --- const BUILTIN_ALLOW: Allow = { blocks: [ "list","emlist","source","cmd","quote","image","figure","table", "note","memo","column","dialog","footnote","reviewlistblock" ], inline: [ "href","code","tt","b","strong","em","i","u","m","rb","kw","key","sup","sub" ] }; type Config = { profile?: "review-5.8"|"review-2.5"|"dual", target?: "latex"|"html"|"idgxml", blockOnUnknownTags?: boolean, autoFixIdsOnSave?: boolean }; async function loadConfig(cwd: string): Promise<Config> { const p = path.join(cwd, "review-mcp.json"); try { const raw = await readFile(p, "utf-8"); return JSON.parse(raw); } catch { return { profile: "dual", target: "latex", blockOnUnknownTags: true, autoFixIdsOnSave: true }; } } async function withBundle(cwd: string, argv: string[]) { try { // If Bundler works, prefer bundle exec to respect project Gemfile await execp("bundle", ["exec", "ruby", "-v"], { cwd, timeout: 8000 }); return await execp("bundle", ["exec", ...argv], { cwd, timeout: 60000, maxBuffer: 10*1024*1024 }); } catch { // Fallback to direct command const [cmd, ...rest] = argv; return await execp(cmd, rest, { cwd, timeout: 60000, maxBuffer: 10*1024*1024 }); } } function slugifyBase(filename: string): string { const base = path.basename(filename, path.extname(filename)); return base.toLowerCase().replace(/[^a-z0-9]+/g, "-").replace(/^-+|-+$/g, ""); } // Regex helpers const RE_BLOCK_OPEN = /^\/\/([A-Za-z0-9_]+)(\[[^\]]*\])?\s*\{\s*$/; const RE_BLOCK_OPEN_G = /^\/\/([A-Za-z0-9_]+)(\[[^\]]*\])?\s*\{\s*$/m; const RE_BLOCK_CLOSE = /^\/\/\}\s*$/; const RE_INLINE_G = /@<([A-Za-z0-9_]+)>\{[^}]*\}/g; const RE_BRACKET = /\[([^\]]*)\]/; const RE_ID_KV = /(?:^|,\s*)id\s*=\s*("?)([^",\]]+)\1/; const RE_CAPTION_ID = /\\review\w*caption\[(.*?)\]\{/; function isIdTargetBlock(name: string) { return new Set(["list","emlist","image","figure","table","source","cmd","quote"]).has(name); } function pickFilesFromCatalog(cwd: string, catalogPath="catalog.yml"): Promise<string[]> { return fs.readFile(path.join(cwd, catalogPath), "utf-8") .then(txt => { const y = YAML.parse(txt); const sections = ["PREDEF","CHAPS","APPENDIX"]; const files: string[] = []; for (const sec of sections) { if (Array.isArray(y?.[sec])) files.push(...y[sec]); } return files; }); } function* scanTags(file: string, text: string, allow: Allow) { const allowB = new Set(allow.blocks); const allowI = new Set(allow.inline); const lines = text.split(/\r?\n/); for (let i=0;i<lines.length;i++) { const m = lines[i].match(RE_BLOCK_OPEN); if (m) { const name = m[1]; if (!allowB.has(name)) { yield { file, line: i+1, kind: "block", name, snippet: lines[i].trim() }; } } } for (const m of text.matchAll(RE_INLINE_G)) { const name = m[1]; if (!allowI.has(name)) { const idx = text.slice(0, m.index!).split(/\r?\n/).length; yield { file, line: idx, kind: "inline", name, snippet: m[0] }; } } } async function gatherUsedIds(cwd: string, files: string[]) { const used = new Set<string>(); for (const f of files) { const txt = await fs.readFile(path.join(cwd, f), "utf-8"); // blocks for (const line of txt.split(/\r?\n/)) { const m = line.match(RE_BLOCK_OPEN); if (!m) continue; const bracket = m[2] ?? ""; if (bracket) { const b = bracket.match(RE_BRACKET); if (b) { const attrs = b[1]; const kv = attrs.match(RE_ID_KV); if (kv) used.add(kv[2].trim()); } } } // captions for (const m of txt.matchAll(RE_CAPTION_ID)) { const id = (m[1] || "").trim(); if (id) used.add(id); } } return used; } function planFixIdsForFile(file: string, text: string, usedIds: Set<string>, prefixBase: string) { const fixes: any[] = []; const lines = text.split(/\r?\n/); for (let i=0;i<lines.length;i++) { const m = lines[i].match(RE_BLOCK_OPEN); if (!m) continue; const name = m[1]; const bracket = m[2] ?? ""; if (!isIdTargetBlock(name)) continue; let idVal: string | null = null; if (bracket) { const b = bracket.match(RE_BRACKET); if (b) { const attrs = b[1]; const kv = attrs.match(RE_ID_KV); idVal = kv ? kv[2].trim() : null; } } const mkId = (base: string) => { let n = 1, cand = `${base}-${String(n).padStart(3,"0")}`; while (usedIds.has(cand)) { n++; cand = `${base}-${String(n).padStart(3,"0")}`; } usedIds.add(cand); return cand; }; if (!idVal || idVal === "") { const cand = mkId(`${prefixBase}-${name}`); const before = lines[i]; let after: string; if (bracket) { after = before.replace(RE_BRACKET, (_all, inner) => { const sep = String(inner).trim().length ? `${inner}, id=${cand}` : `id=${cand}`; return `[${sep}]`; }); } else { after = before.replace(/^\/\/([A-Za-z0-9_]+)/, `//$1[id=${cand}]`); } fixes.push({ file, lineStart: i+1, lineEnd: i+1, before, after, reason: "empty" }); } else if (usedIds.has(idVal)) { const cand = mkId(`${prefixBase}-${name}`); const before = lines[i]; const after = before.replace(RE_ID_KV, (_a, q) => `${q ? `id=${q}${cand}${q}` : `id=${cand}`}`); fixes.push({ file, lineStart: i+1, lineEnd: i+1, before, after, reason: "duplicate" }); } else { usedIds.add(idVal); } } // captions for (let i=0;i<lines.length;i++) { const m = lines[i].match(RE_CAPTION_ID); if (!m) continue; const id = (m[1] || "").trim(); const mkId = (base: string) => { let n = 1, cand = `${base}-${String(n).padStart(3,"0")}`; while (usedIds.has(cand)) { n++; cand = `${base}-${String(n).padStart(3,"0")}`; } usedIds.add(cand); return cand; }; if (!id || usedIds.has(id)) { const cand = mkId(`${prefixBase}-cap`); const before = lines[i]; const after = before.replace(RE_CAPTION_ID, (_all) => `\\reviewlistcaption[${cand}]{`); fixes.push({ file, lineStart: i+1, lineEnd: i+1, before, after, reason: id ? "duplicate" : "empty" }); } else { usedIds.add(id); } } return fixes; } async function applyFixes(cwd: string, fixes: any[]) { const byFile = new Map<string, any[]>(); for (const f of fixes) { if (!byFile.has(f.file)) byFile.set(f.file, []); byFile.get(f.file)!.push(f); } let applied = 0; for (const [file, list] of byFile) { const full = path.join(cwd, file); const txt = await fs.readFile(full, "utf-8"); const lines = txt.split(/\r?\n/); for (const f of list.sort((a,b)=>b.lineStart-a.lineStart)) { lines[f.lineStart-1] = f.after; applied++; } await fs.copyFile(full, full + ".bak"); await fs.writeFile(full, lines.join("\n"), "utf-8"); } return applied; } const server = new Server( { name: "review-mcp", version: "0.1.0" }, { capabilities: { tools: {} } } ); // Tool definitions const tools = [ { name: "review.version", description: "Return Re:VIEW CLI version (prefers bundle exec).", inputSchema: { type: "object", properties: { cwd: { type: "string" } }, required: ["cwd"] } }, { name: "review.tags.list", description: "Return allowed tags (built-in conservative list; replace with dynamic probe later).", inputSchema: { type: "object", properties: { cwd: { type: "string" }, profile: { type: "string" } }, required: ["cwd"] } }, { name: "review.enforceTags.check", description: "Scan .re files for unknown tags using allowlist; returns violations.", inputSchema: { type: "object", properties: { cwd: { type: "string" }, allow: { type: "object" } }, required: ["cwd"] } }, { name: "review.fixIds.plan", description: "Plan auto-fixes for empty/duplicate IDs across all .re files.", inputSchema: { type: "object", properties: { cwd: { type: "string" } }, required: ["cwd"] } }, { name: "review.fixIds.apply", description: "Apply a previously calculated ID-fix plan; creates .bak backups.", inputSchema: { type: "object", properties: { cwd: { type: "string" }, fixes: { type: "array", items: { type: "object" } } }, required: ["cwd","fixes"] } }, { name: "review.lint", description: "Run a fast sanity check by compiling each .re to latex and parsing stderr warnings.", inputSchema: { type: "object", properties: { cwd: { type: "string" } }, required: ["cwd"] } }, // Hybrid pipeline commands { name: "review.preprocess", description: "JS preprocessor only - normalizes input and adds metadata", inputSchema: { type: "object", properties: { cwd: { type: "string" }, pattern: { type: "string" }, output: { type: "string" }, stats: { type: "boolean" } }, required: ["cwd"] } }, { name: "review.build-pdf-hybrid", description: "JS→Ruby hybrid pipeline for PDF generation (PDF first priority)", inputSchema: { type: "object", properties: { cwd: { type: "string" }, config: { type: "string" }, skipPreprocess: { type: "boolean" } }, required: ["cwd"] } }, { name: "review.check-ruby-extensions", description: "Verify Ruby extensions (ReviewExtention) are loaded correctly", inputSchema: { type: "object", properties: { cwd: { type: "string" } }, required: ["cwd"] } }, { name: "review.test-mapfile", description: "Test #@mapfile macro with security validation (developer tool)", inputSchema: { type: "object", properties: { cwd: { type: "string" }, file: { type: "string" } }, required: ["cwd", "file"] } }, // Security SSOT commands { name: "review.security.config", description: "Get current security configuration (SSOT from ReviewExtention)", inputSchema: { type: "object", properties: { cwd: { type: "string" }, forceReload: { type: "boolean" } }, required: ["cwd"] } }, { name: "review.security.validate-mapfile", description: "Validate mapfile path and content against security policy", inputSchema: { type: "object", properties: { cwd: { type: "string" }, filepath: { type: "string" } }, required: ["cwd", "filepath"] } }, { name: "review.security.compare", description: "Compare MCP config with ReviewExtention config to ensure SSOT", inputSchema: { type: "object", properties: { cwd: { type: "string" } }, required: ["cwd"] } } ]; function parseStderr(stderr: string, fallbackFile?: string) { const diags: any[] = []; const lines = stderr.split(/\r?\n/); const reInvalid = /^([^\s:]+):(\d+):\s+`\/\/'\s+seen.*?:\s+"(.+)"$/; // 09_xx.re:42: `//' seen ... const reDupId = /warning:\s+duplicate ID:/i; for (let raw of lines) { const line = raw.replace(/^\p{So}|^\s*⚠\s*WARN\s*/u, "").trim(); const m = line.match(reInvalid); if (m) { diags.push({ file: m[1], line: Number(m[2]), severity: "warning", message: `Invalid block start '//': ${m[3]}` }); continue; } if (reDupId.test(line)) { diags.push({ file: fallbackFile ?? null, line: null, severity: "warning", message: "Duplicate/empty ID detected" }); } } return diags; } // Register handlers server.setRequestHandler(ListToolsRequestSchema, async () => { return { tools: tools }; }); server.setRequestHandler(CallToolRequestSchema, async (request) => { const { name, arguments: args } = request.params; // Type guard for arguments if (!args || typeof args !== 'object') { throw new Error('Invalid arguments'); } switch (name) { case "review.version": { const { stdout } = await withBundle(args.cwd as string, ["review", "--version"]); return { content: [ { type: "text", text: JSON.stringify({ version: stdout.trim() }) } ] }; } case "review.tags.list": { return { content: [ { type: "text", text: JSON.stringify({ blocks: BUILTIN_ALLOW.blocks, inline: BUILTIN_ALLOW.inline, meta: { source: "builtin" } }) } ] }; } case "review.enforceTags.check": { const cfg = await loadConfig(args.cwd as string); const files = await pickFilesFromCatalog(args.cwd as string); const a: Allow = (args.allow as Allow) ?? BUILTIN_ALLOW; const violations: any[] = []; for (const f of files) { const p = path.join(args.cwd as string, f); try { const txt = await fs.readFile(p, "utf-8"); for (const v of scanTags(f, txt, a)) violations.push(v); } catch (e) { violations.push({ file: f, error: String(e) }); } } return { content: [ { type: "text", text: JSON.stringify({ profile: cfg.profile, violations }) } ] }; } case "review.fixIds.plan": { const files = await pickFilesFromCatalog(args.cwd as string); const used = await gatherUsedIds(args.cwd as string, files); const fixes: any[] = []; for (const f of files) { const p = path.join(args.cwd as string, f); const txt = await fs.readFile(p, "utf-8"); const prefix = slugifyBase(f); const plan = planFixIdsForFile(f, txt, used, prefix); fixes.push(...plan); } return { content: [ { type: "text", text: JSON.stringify({ count: fixes.length, fixes }) } ] }; } case "review.fixIds.apply": { const applied = await applyFixes(args.cwd as string, args.fixes as any[]); return { content: [ { type: "text", text: JSON.stringify({ applied }) } ] }; } case "review.lint": { const files = await pickFilesFromCatalog(args.cwd as string); const diagnostics: any[] = []; for (const f of files) { try { await withBundle(args.cwd as string, ["review-compile", "--target=latex", "--footnotetext", f]); } catch (e: any) { const stderr = e?.stderr || e?.message || ""; diagnostics.push(...parseStderr(stderr, f)); continue; } } return { content: [ { type: "text", text: JSON.stringify({ diagnostics }) } ] }; } case "review.preprocess": { const result = await hybridCommands.preprocess({ cwd: args.cwd as string, pattern: args.pattern as string | undefined, output: args.output as string | undefined, stats: args.stats as boolean | undefined }); return { content: [ { type: "text", text: JSON.stringify(result) } ] }; } case "review.build-pdf-hybrid": { const result = await hybridCommands.buildPdfHybrid({ cwd: args.cwd as string, config: args.config as string | undefined, skipPreprocess: args.skipPreprocess as boolean | undefined }); return { content: [ { type: "text", text: JSON.stringify(result) } ] }; } case "review.check-ruby-extensions": { const result = await hybridCommands.checkRubyExtensions({ cwd: args.cwd as string }); return { content: [ { type: "text", text: JSON.stringify(result) } ] }; } case "review.test-mapfile": { const securityConfig = await loadSecurityConfig(args.cwd as string); const pathValidation = validateMapfilePath(args.file as string, securityConfig); if (!pathValidation.valid) { return { content: [ { type: "text", text: JSON.stringify({ success: false, error: `Security validation failed: ${pathValidation.reason}` }) } ] }; } const sizeValidation = await validateMapfileSize( args.file as string, args.cwd as string, securityConfig ); if (!sizeValidation.valid) { return { content: [ { type: "text", text: JSON.stringify({ success: false, error: `File size validation failed: ${sizeValidation.reason}` }) } ] }; } const result = await hybridCommands.testMapfile({ file: args.file as string, cwd: args.cwd as string }); return { content: [ { type: "text", text: JSON.stringify(result) } ] }; } case "review.security.config": { const config = await loadSecurityConfig( args.cwd as string, args.forceReload as boolean | undefined ); return { content: [ { type: "text", text: JSON.stringify(config) } ] }; } case "review.security.validate-mapfile": { const config = await loadSecurityConfig(args.cwd as string); const pathValidation = validateMapfilePath(args.filepath as string, config); if (!pathValidation.valid) { return { content: [ { type: "text", text: JSON.stringify({ valid: false, reason: pathValidation.reason, config: { source: config.source } }) } ] }; } const sizeValidation = await validateMapfileSize( args.filepath as string, args.cwd as string, config ); if (!sizeValidation.valid) { return { content: [ { type: "text", text: JSON.stringify({ valid: false, reason: sizeValidation.reason, size: sizeValidation.size, config: { source: config.source, maxFileSize: config.maxFileSize } }) } ] }; } const fullPath = path.join(args.cwd as string, args.filepath as string); try { const content = await fs.readFile(fullPath, "utf-8"); const sanitization = await sanitizeMapfile(content, args.filepath as string, config); return { content: [ { type: "text", text: JSON.stringify({ valid: sanitization.safe, size: sizeValidation.size, issues: sanitization.issues, config: { source: config.source } }) } ] }; } catch (error: any) { return { content: [ { type: "text", text: JSON.stringify({ valid: false, error: error.message }) } ] }; } } case "review.security.compare": { const currentConfig = await loadSecurityConfig(args.cwd as string); const comparison = await compareWithReviewExtention(args.cwd as string, currentConfig); return { content: [ { type: "text", text: JSON.stringify({ currentSource: currentConfig.source, matching: comparison.matching, differences: comparison.differences }) } ] }; } default: throw new Error(`Unknown tool: ${name}`); } }); // Start server const transport = new StdioServerTransport(); await server.connect(transport); console.log("[review-mcp] Minimal MCP server started.");

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/dsgarage/ReviewMCP'

If you have feedback or need assistance with the MCP directory API, please join our Discord server