#!/usr/bin/env node
/**
* Lotus MCP Server - Multi-Platform Support
*
* This MCP server works with both Cursor and Claude Code:
* - Cursor: Writes URL to temp file, extension opens Simple Browser
* - Claude Code: Opens URL in system browser
*
* Set LOTUS_PLATFORM environment variable to "cursor" or "cli" (default: "cli")
*/
import mic from "node-mic";
import { writeFileSync } from "fs";
import { tmpdir } from "os";
import { join } from "path";
import { exec } from "child_process";
// Platform detection
const PLATFORM = process.env.LOTUS_PLATFORM || "cli"; // "cursor" or "cli"
const BROWSER_URL_FILE = join(tmpdir(), "lotus-browser-url.txt");
const RECORDING_STATE_FILE = join(tmpdir(), "lotus-recording-state.json");
interface Skill {
id: string
name: string
description: string
inputs: { name: string; type: string; description: string }[]
}
interface MCPRequest {
jsonrpc: "2.0"
id: number | string
method: string
params?: Record<string, unknown>
}
const LOTUS_API_KEY = process.env.LOTUS_API_KEY
const LOTUS_URL = process.env.LOTUS_URL
let skills: Skill[] = []
// Active recording session state
let activeRecordingSession: {
sessionId: string
liveViewUrl: string
micInstance: any
audioChunks: Buffer[]
} | null = null
// Active refinement session state
let activeRefineSession: {
sessionId: string
liveViewUrl: string
} | null = null
function send(message: object): void {
process.stdout.write(JSON.stringify(message) + "\n")
}
function sendError(id: number | string | null, code: number, message: string): void {
send({ jsonrpc: "2.0", id, error: { code, message } })
}
function log(message: string): void {
// Log to stderr so it doesn't interfere with MCP protocol on stdout
console.error(`[lotus-mcp:${PLATFORM}] ${message}`)
}
// Log config at module load (helps debug env var issues)
log(`Config: LOTUS_URL=${LOTUS_URL}, API_KEY=${LOTUS_API_KEY ? `${LOTUS_API_KEY.slice(0, 4)}...` : 'NOT SET'}`)
/**
* Fetch JSON with proper error handling for non-JSON responses
*/
async function fetchJson<T>(url: string, options?: RequestInit): Promise<T> {
const res = await fetch(url, options)
const contentType = res.headers.get('content-type') || ''
if (!contentType.includes('application/json')) {
const text = await res.text()
log(`API error: Expected JSON, got ${contentType}. URL: ${url}`)
log(`Response body (first 500 chars): ${text.slice(0, 500)}`)
throw new Error(`API returned ${res.status} with ${contentType || 'no content-type'}. Check LOTUS_URL and LOTUS_API_KEY.`)
}
if (!res.ok) {
const error = await res.json() as { message?: string }
throw new Error(`API error ${res.status}: ${error.message || JSON.stringify(error)}`)
}
return res.json() as T
}
// ============================================================================
// PLATFORM-SPECIFIC BROWSER HANDLING
// ============================================================================
/**
* Handle browser ready - platform specific
*/
function onBrowserReady(url: string, sessionId: string): void {
if (PLATFORM === "cursor") {
// Write to temp file - VS Code extension will pick it up
writeFileSync(BROWSER_URL_FILE, url, "utf8");
writeFileSync(RECORDING_STATE_FILE, JSON.stringify({
status: "recording",
url,
sessionId,
timestamp: Date.now()
}), "utf8");
log(`Browser URL written to ${BROWSER_URL_FILE}`);
} else {
// Open in system browser
openInSystemBrowser(url);
}
}
/**
* Update recording state (for Cursor status bar)
*/
function updateRecordingState(status: "recording" | "stopped" | "idle" | "refining"): void {
if (PLATFORM === "cursor") {
writeFileSync(RECORDING_STATE_FILE, JSON.stringify({
status,
timestamp: Date.now()
}), "utf8");
}
}
/**
* Open URL in system browser (for Claude Code)
*/
function openInSystemBrowser(url: string): void {
const platform = process.platform;
let command: string;
if (platform === "darwin") {
command = `open "${url}"`;
} else if (platform === "win32") {
command = `start "" "${url}"`;
} else {
command = `xdg-open "${url}"`;
}
exec(command, (error) => {
if (error) {
log(`Failed to open browser: ${error.message}`);
log(`Please open manually: ${url}`);
} else {
log(`Opened browser: ${url}`);
}
});
}
/**
* Get platform-specific instructions for the user
*/
function getBrowserInstructions(url: string): string {
if (PLATFORM === "cursor") {
return `The browser should open automatically in your Cursor panel.
If it doesn't appear, run: Cmd+Shift+P â "Lotus: Open Browser"`;
} else {
return `I've opened the browser in a new window.
If it didn't open, here's the URL: ${url}`;
}
}
// ============================================================================
// REST OF THE MCP IMPLEMENTATION (unchanged from original)
// ============================================================================
async function loadSkills(): Promise<boolean> {
try {
const data = await fetchJson<{ skills?: Skill[] }>(`${LOTUS_URL}/api/v1/skills`, {
headers: { "X-API-Key": LOTUS_API_KEY! }
})
skills = data.skills || []
return true
} catch (e) {
log(`Failed to load skills: ${(e as Error).message}`)
return false
}
}
async function executeSkill(skillId: string, inputs: Record<string, unknown>): Promise<{ success: boolean; result?: unknown; error?: string }> {
try {
return await fetchJson<{ success: boolean; result?: unknown; error?: string }>(`${LOTUS_URL}/api/v1/skills/${skillId}/execute`, {
method: "POST",
headers: { "X-API-Key": LOTUS_API_KEY!, "Content-Type": "application/json" },
body: JSON.stringify({ inputs })
})
} catch (e) {
return { success: false, error: (e as Error).message }
}
}
function toJsonSchema(type?: string): { type: string } {
switch (type?.toLowerCase()) {
case "number": return { type: "number" }
case "boolean": return { type: "boolean" }
default: return { type: "string" }
}
}
function toToolName(name: string): string {
return `lotus_${name.toLowerCase().replace(/[^a-z0-9]/g, "_")}`
}
// Start microphone recording
function startMicRecording(): void {
if (!activeRecordingSession) return
try {
const Mic = mic as any
const micInstance = new Mic({
rate: 16000,
channels: 1,
fileType: "wav"
})
const audioStream = micInstance.getAudioStream()
const chunks: Buffer[] = []
audioStream.on("data", (data: Buffer) => {
chunks.push(data)
})
audioStream.on("error", (err: Error) => {
log(`Mic error: ${err.message}`)
})
micInstance.start()
activeRecordingSession.micInstance = micInstance
activeRecordingSession.audioChunks = chunks
log("Microphone recording started")
} catch (err: any) {
log(`Failed to start mic: ${err.message}`)
}
}
// Stop microphone and get audio buffer
function stopMicRecording(): Buffer | undefined {
if (!activeRecordingSession?.micInstance) return undefined
try {
activeRecordingSession.micInstance.stop()
const chunks = activeRecordingSession.audioChunks || []
log("Microphone recording stopped")
if (chunks.length > 0) {
return Buffer.concat(chunks)
}
} catch (err: any) {
log(`Failed to stop mic: ${err.message}`)
}
return undefined
}
// API helpers for skill creation flow
interface StartRecordingResponse {
session_id: string
live_view_url: string
message: string
}
async function apiStartRecording(url: string): Promise<StartRecordingResponse | { error: string }> {
try {
return await fetchJson<StartRecordingResponse>(`${LOTUS_URL}/api/v1/record/start`, {
method: "POST",
headers: { "X-API-Key": LOTUS_API_KEY!, "Content-Type": "application/json" },
body: JSON.stringify({ url })
})
} catch (e) {
return { error: (e as Error).message }
}
}
interface StopRecordingResponse {
workflow_summary?: string
suggested_name?: string
detected_auth?: boolean
transcript?: string
inferred_inputs?: Array<{ name: string; type: string; description: string }>
error?: string
}
async function apiStopRecording(sessionId: string, audioBase64?: string): Promise<StopRecordingResponse> {
try {
return await fetchJson<StopRecordingResponse>(`${LOTUS_URL}/api/v1/record/stop`, {
method: "POST",
headers: { "X-API-Key": LOTUS_API_KEY!, "Content-Type": "application/json" },
body: JSON.stringify({ session_id: sessionId, audio_base64: audioBase64 })
})
} catch (e) {
return { error: (e as Error).message }
}
}
interface RefineStartResponse {
live_view_url?: string
error?: string
}
async function apiStartRefine(sessionId: string, name?: string, description?: string): Promise<RefineStartResponse> {
try {
return await fetchJson<RefineStartResponse>(`${LOTUS_URL}/api/v1/refine/start`, {
method: "POST",
headers: { "X-API-Key": LOTUS_API_KEY!, "Content-Type": "application/json" },
body: JSON.stringify({ session_id: sessionId, name, description })
})
} catch (e) {
return { error: (e as Error).message }
}
}
interface RefineStatusResponse {
status: "refining" | "question" | "complete" | "error"
question?: string
skill?: Skill
error?: string
}
async function apiRefineStatus(sessionId: string): Promise<RefineStatusResponse> {
try {
return await fetchJson<RefineStatusResponse>(`${LOTUS_URL}/api/v1/refine/status?session_id=${encodeURIComponent(sessionId)}`, {
headers: { "X-API-Key": LOTUS_API_KEY! }
})
} catch (e) {
return { status: "error", error: (e as Error).message }
}
}
interface ApiResponse {
success?: boolean
error?: string
}
async function apiRefineAnswer(sessionId: string, answer: string): Promise<ApiResponse> {
try {
return await fetchJson<ApiResponse>(`${LOTUS_URL}/api/v1/refine/answer`, {
method: "POST",
headers: { "X-API-Key": LOTUS_API_KEY!, "Content-Type": "application/json" },
body: JSON.stringify({ session_id: sessionId, answer })
})
} catch (e) {
return { error: (e as Error).message }
}
}
async function apiCancelSession(sessionId: string): Promise<ApiResponse> {
try {
return await fetchJson<ApiResponse>(`${LOTUS_URL}/api/v1/session/cancel`, {
method: "POST",
headers: { "X-API-Key": LOTUS_API_KEY!, "Content-Type": "application/json" },
body: JSON.stringify({ session_id: sessionId })
})
} catch (e) {
return { error: (e as Error).message }
}
}
async function handleRequest(request: MCPRequest): Promise<void> {
const { id, method, params } = request
switch (method) {
case "initialize": {
if (!LOTUS_API_KEY || !LOTUS_URL) {
sendError(id, -32600, "LOTUS_API_KEY and LOTUS_URL environment variables required")
return
}
if (!await loadSkills()) {
sendError(id, -32600, "Failed to connect to Lotus")
return
}
log(`Initialized (platform: ${PLATFORM})`)
send({
jsonrpc: "2.0",
id,
result: {
protocolVersion: "2025-11-25",
capabilities: { tools: {} },
serverInfo: { name: "lotus-mcp", version: "0.3.0" }
}
})
break
}
case "notifications/initialized":
break
case "tools/list": {
await loadSkills()
// Skill execution tools (existing)
const skillTools = skills.map(skill => ({
name: toToolName(skill.name),
description: skill.description || skill.name,
inputSchema: {
type: "object",
properties: Object.fromEntries(
(skill.inputs || []).map(i => [i.name, { ...toJsonSchema(i.type), description: i.description }])
),
required: (skill.inputs || []).map(i => i.name)
}
}))
// Skill creation tools
const creationTools = [
{
name: "lotus_start_recording",
description: "Start recording a new skill. Opens a cloud browser for the user to perform actions. The user should narrate what they're doing - audio is captured automatically via microphone.",
inputSchema: {
type: "object",
properties: {
url: { type: "string", description: "The URL to start recording from" }
},
required: ["url"]
}
},
{
name: "lotus_stop_recording",
description: "Stop the current recording session. This will stop audio capture, analyze the workflow, and prepare it for refinement. Returns workflow analysis including suggested name and detected inputs.",
inputSchema: {
type: "object",
properties: {},
required: []
}
},
{
name: "lotus_refine_skill",
description: "Refine the recorded workflow into a skill. Opens a live view in the browser where you can watch the AI test and refine the skill. This tool blocks until refinement is complete or the AI has a question - no polling needed.",
inputSchema: {
type: "object",
properties: {
name: { type: "string", description: "Optional name for the skill (overrides suggested name)" },
description: { type: "string", description: "Optional description for the skill" }
},
required: []
}
},
{
name: "lotus_refine_status",
description: "Check the status of the refinement process (for debugging only - lotus_refine_skill now blocks until complete). Returns 'refining', 'question', 'complete', or 'error'.",
inputSchema: {
type: "object",
properties: {},
required: []
}
},
{
name: "lotus_refine_answer",
description: "Answer a clarifying question from the AI during refinement. After sending the answer, this tool blocks until refinement is complete or another question arises - no polling needed.",
inputSchema: {
type: "object",
properties: {
answer: { type: "string", description: "Your answer to the AI's question" }
},
required: ["answer"]
}
},
{
name: "lotus_cancel_session",
description: "Cancel the current recording or refinement session and clean up resources.",
inputSchema: {
type: "object",
properties: {},
required: []
}
}
]
send({ jsonrpc: "2.0", id, result: { tools: [...creationTools, ...skillTools] } })
break
}
case "tools/call": {
const toolName = params?.name as string
const toolArgs = (params?.arguments || {}) as Record<string, unknown>
// Handle skill creation tools
if (toolName === "lotus_start_recording") {
const url = toolArgs.url as string
if (!url) {
send({
jsonrpc: "2.0",
id,
result: {
content: [{ type: "text", text: "Error: URL is required" }],
isError: true
}
})
return
}
// Cancel any existing session
if (activeRecordingSession) {
await apiCancelSession(activeRecordingSession.sessionId)
activeRecordingSession = null
}
if (activeRefineSession) {
await apiCancelSession(activeRefineSession.sessionId)
activeRefineSession = null
}
const result = await apiStartRecording(url)
if ("error" in result) {
send({
jsonrpc: "2.0",
id,
result: {
content: [{ type: "text", text: `Error starting recording: ${result.error}` }],
isError: true
}
})
return
}
activeRecordingSession = {
sessionId: result.session_id,
liveViewUrl: result.live_view_url,
micInstance: null,
audioChunks: []
}
// đ¯ PLATFORM-SPECIFIC: Open browser
onBrowserReady(result.live_view_url, result.session_id)
updateRecordingState("recording")
// Start microphone recording
startMicRecording()
const instructions = getBrowserInstructions(result.live_view_url)
send({
jsonrpc: "2.0",
id,
result: {
content: [{
type: "text",
text: `đŦ Recording started!
${instructions}
**What to do:**
1. Navigate through the website
2. Perform the actions you want to automate
3. Narrate what you're doing out loud (your voice is being recorded)
4. Tell me "I'm done" when finished
I'm watching everything!`
}]
}
})
return
}
if (toolName === "lotus_stop_recording") {
if (!activeRecordingSession) {
send({
jsonrpc: "2.0",
id,
result: {
content: [{ type: "text", text: "Error: No active recording session. Call lotus_start_recording first." }],
isError: true
}
})
return
}
// Stop microphone and get audio
const audioBuffer = stopMicRecording()
const audioBase64 = audioBuffer ? audioBuffer.toString("base64") : undefined
updateRecordingState("stopped")
const result = await apiStopRecording(activeRecordingSession.sessionId, audioBase64)
if (result.error) {
activeRecordingSession = null
send({
jsonrpc: "2.0",
id,
result: {
content: [{ type: "text", text: `Error stopping recording: ${result.error}` }],
isError: true
}
})
return
}
// Keep session ID for refinement
const sessionId = activeRecordingSession.sessionId
activeRecordingSession = null
activeRefineSession = { sessionId, liveViewUrl: "" }
let responseText = `âšī¸ Recording stopped!\n\n**Summary:** ${result.workflow_summary}\n**Suggested Name:** ${result.suggested_name}\n**Detected Auth:** ${result.detected_auth ? "Yes" : "No"}`
if (result.transcript) {
responseText += `\n**Your Narration:** "${result.transcript}"`
}
if (result.inferred_inputs && result.inferred_inputs.length > 0) {
responseText += `\n**Inferred Inputs:**\n${result.inferred_inputs.map((i: any) => ` - ${i.name} (${i.type}): ${i.description}`).join("\n")}`
}
responseText += `\n\nReady to refine this into a skill. Call lotus_refine_skill to start the refinement process.`
send({
jsonrpc: "2.0",
id,
result: {
content: [{ type: "text", text: responseText }]
}
})
return
}
if (toolName === "lotus_refine_skill") {
if (!activeRefineSession) {
send({
jsonrpc: "2.0",
id,
result: {
content: [{ type: "text", text: "Error: No recorded workflow ready. Call lotus_start_recording and lotus_stop_recording first." }],
isError: true
}
})
return
}
const name = toolArgs.name as string | undefined
const description = toolArgs.description as string | undefined
const result = await apiStartRefine(activeRefineSession.sessionId, name, description)
if (result.error) {
send({
jsonrpc: "2.0",
id,
result: {
content: [{ type: "text", text: `Error starting refinement: ${result.error}` }],
isError: true
}
})
return
}
activeRefineSession.liveViewUrl = result.live_view_url || ""
// Open browser automatically (same panel as recording)
if (result.live_view_url) {
onBrowserReady(result.live_view_url, activeRefineSession.sessionId)
}
updateRecordingState("refining")
// Poll until complete, question, or error (internal polling - no LLM polling needed)
const pollInterval = 3000 // 3 seconds
const maxPolls = 200 // ~10 minutes max
for (let i = 0; i < maxPolls; i++) {
await new Promise(r => setTimeout(r, pollInterval))
const status = await apiRefineStatus(activeRefineSession.sessionId)
if (status.status === "complete") {
updateRecordingState("idle")
activeRefineSession = null
await loadSkills()
const skill = status.skill
if (!skill) {
send({
jsonrpc: "2.0",
id,
result: {
content: [{ type: "text", text: "Skill created but details not available. Check your skills list." }]
}
})
return
}
send({
jsonrpc: "2.0",
id,
result: {
content: [{
type: "text",
text: `â
Skill created successfully!\n\n**Name:** ${skill.name}\n**Description:** ${skill.description}\n**Inputs:** ${skill.inputs?.length > 0 ? skill.inputs.map((i) => `${i.name} (${i.type})`).join(", ") : "None"}\n**ID:** ${skill.id}\n\nYou can now use this skill by calling ${toToolName(skill.name)}`
}]
}
})
return
}
if (status.status === "question") {
send({
jsonrpc: "2.0",
id,
result: {
content: [{
type: "text",
text: `đ¤ The AI has a question:\n\n**${status.question}**\n\nUse lotus_refine_answer to respond.`
}]
}
})
return
}
if (status.status === "error") {
updateRecordingState("idle")
activeRefineSession = null
send({
jsonrpc: "2.0",
id,
result: {
content: [{ type: "text", text: `Refinement failed: ${status.error}` }],
isError: true
}
})
return
}
// Still refining - continue polling
log(`Refinement in progress... (poll ${i + 1}/${maxPolls})`)
}
// Timed out
updateRecordingState("idle")
activeRefineSession = null
send({
jsonrpc: "2.0",
id,
result: {
content: [{ type: "text", text: "Refinement timed out after 10 minutes. Please try again." }],
isError: true
}
})
return
}
if (toolName === "lotus_refine_status") {
if (!activeRefineSession) {
send({
jsonrpc: "2.0",
id,
result: {
content: [{ type: "text", text: "Error: No active refinement session." }],
isError: true
}
})
return
}
const result = await apiRefineStatus(activeRefineSession.sessionId)
if (result.error) {
send({
jsonrpc: "2.0",
id,
result: {
content: [{ type: "text", text: `Error checking status: ${result.error}` }],
isError: true
}
})
return
}
if (result.status === "complete") {
activeRefineSession = null
updateRecordingState("idle")
// Reload skills to include the new one
await loadSkills()
const skill = result.skill
if (!skill) {
send({
jsonrpc: "2.0",
id,
result: {
content: [{ type: "text", text: "Skill created but details not available. Check your skills list." }]
}
})
return
}
send({
jsonrpc: "2.0",
id,
result: {
content: [{
type: "text",
text: `â
Skill created successfully!\n\n**Name:** ${skill.name}\n**Description:** ${skill.description}\n**Inputs:** ${skill.inputs?.length > 0 ? skill.inputs.map((i) => `${i.name} (${i.type})`).join(", ") : "None"}\n**ID:** ${skill.id}\n\nYou can now use this skill by calling ${toToolName(skill.name)}`
}]
}
})
return
}
if (result.status === "question") {
send({
jsonrpc: "2.0",
id,
result: {
content: [{
type: "text",
text: `đ¤ The AI has a question:\n\n**${result.question}**\n\nUse lotus_refine_answer to respond.`
}]
}
})
return
}
if (result.status === "error") {
activeRefineSession = null
updateRecordingState("idle")
send({
jsonrpc: "2.0",
id,
result: {
content: [{ type: "text", text: `Refinement failed: ${result.error}` }],
isError: true
}
})
return
}
// Still refining
send({
jsonrpc: "2.0",
id,
result: {
content: [{ type: "text", text: "âŗ Refinement in progress... Call lotus_refine_status again to check." }]
}
})
return
}
if (toolName === "lotus_refine_answer") {
if (!activeRefineSession) {
send({
jsonrpc: "2.0",
id,
result: {
content: [{ type: "text", text: "Error: No active refinement session." }],
isError: true
}
})
return
}
const answer = toolArgs.answer as string
if (!answer) {
send({
jsonrpc: "2.0",
id,
result: {
content: [{ type: "text", text: "Error: answer is required" }],
isError: true
}
})
return
}
const answerResult = await apiRefineAnswer(activeRefineSession.sessionId, answer)
if (answerResult.error) {
send({
jsonrpc: "2.0",
id,
result: {
content: [{ type: "text", text: `Error sending answer: ${answerResult.error}` }],
isError: true
}
})
return
}
// Poll until complete, another question, or error (auto-continue after answer)
const pollInterval = 3000 // 3 seconds
const maxPolls = 200 // ~10 minutes max
for (let i = 0; i < maxPolls; i++) {
await new Promise(r => setTimeout(r, pollInterval))
const status = await apiRefineStatus(activeRefineSession.sessionId)
if (status.status === "complete") {
updateRecordingState("idle")
activeRefineSession = null
await loadSkills()
const skill = status.skill
if (!skill) {
send({
jsonrpc: "2.0",
id,
result: {
content: [{ type: "text", text: "Skill created but details not available. Check your skills list." }]
}
})
return
}
send({
jsonrpc: "2.0",
id,
result: {
content: [{
type: "text",
text: `â
Skill created successfully!\n\n**Name:** ${skill.name}\n**Description:** ${skill.description}\n**Inputs:** ${skill.inputs?.length > 0 ? skill.inputs.map((i) => `${i.name} (${i.type})`).join(", ") : "None"}\n**ID:** ${skill.id}\n\nYou can now use this skill by calling ${toToolName(skill.name)}`
}]
}
})
return
}
if (status.status === "question") {
send({
jsonrpc: "2.0",
id,
result: {
content: [{
type: "text",
text: `đ¤ The AI has another question:\n\n**${status.question}**\n\nUse lotus_refine_answer to respond.`
}]
}
})
return
}
if (status.status === "error") {
updateRecordingState("idle")
activeRefineSession = null
send({
jsonrpc: "2.0",
id,
result: {
content: [{ type: "text", text: `Refinement failed: ${status.error}` }],
isError: true
}
})
return
}
// Still refining - continue polling
log(`Refinement in progress after answer... (poll ${i + 1}/${maxPolls})`)
}
// Timed out
updateRecordingState("idle")
activeRefineSession = null
send({
jsonrpc: "2.0",
id,
result: {
content: [{ type: "text", text: "Refinement timed out after 10 minutes. Please try again." }],
isError: true
}
})
return
}
if (toolName === "lotus_cancel_session") {
let cancelled = false
if (activeRecordingSession) {
stopMicRecording()
await apiCancelSession(activeRecordingSession.sessionId)
activeRecordingSession = null
cancelled = true
}
if (activeRefineSession) {
await apiCancelSession(activeRefineSession.sessionId)
activeRefineSession = null
cancelled = true
}
updateRecordingState("idle")
send({
jsonrpc: "2.0",
id,
result: {
content: [{
type: "text",
text: cancelled ? "Session cancelled and resources cleaned up." : "No active session to cancel."
}]
}
})
return
}
// Handle existing skill execution tools
const skill = skills.find(s => toToolName(s.name) === toolName)
if (!skill) {
sendError(id, -32602, `Tool not found: ${toolName}`)
return
}
const result = await executeSkill(skill.id, toolArgs)
send({
jsonrpc: "2.0",
id,
result: {
content: [{
type: "text",
text: result.success
? (typeof result.result === "string" ? result.result : JSON.stringify(result.result, null, 2))
: `Error: ${result.error}`
}],
...(result.success ? {} : { isError: true })
}
})
break
}
case "resources/list":
case "resources/read":
case "prompts/list":
case "prompts/get":
send({ jsonrpc: "2.0", id, result: {} })
break
default:
sendError(id, -32601, `Method not found: ${method}`)
}
}
async function main(): Promise<void> {
log(`Starting (platform: ${PLATFORM})`)
let buffer = ""
let processing = false
const queue: string[] = []
async function processQueue(): Promise<void> {
if (processing) return
processing = true
while (queue.length > 0) {
const line = queue.shift()!
try {
await handleRequest(JSON.parse(line))
} catch {
sendError(null, -32700, "Parse error")
}
}
processing = false
}
process.stdin.setEncoding("utf8")
process.stdin.on("data", (chunk: string) => {
buffer += chunk
const lines = buffer.split("\n")
buffer = lines.pop() || ""
for (const line of lines) {
if (line.trim()) queue.push(line)
}
processQueue()
})
process.stdin.on("end", () => {
// Clean up any active sessions on exit
if (activeRecordingSession) {
stopMicRecording()
apiCancelSession(activeRecordingSession.sessionId).catch(() => {})
}
if (activeRefineSession) {
apiCancelSession(activeRefineSession.sessionId).catch(() => {})
}
if (buffer.trim()) {
queue.push(buffer)
processQueue().then(() => process.exit(0))
} else {
process.exit(0)
}
})
// Handle process signals for cleanup
process.on("SIGINT", () => {
if (activeRecordingSession) {
stopMicRecording()
apiCancelSession(activeRecordingSession.sessionId).catch(() => {})
}
if (activeRefineSession) {
apiCancelSession(activeRefineSession.sessionId).catch(() => {})
}
process.exit(0)
})
process.on("SIGTERM", () => {
if (activeRecordingSession) {
stopMicRecording()
apiCancelSession(activeRecordingSession.sessionId).catch(() => {})
}
if (activeRefineSession) {
apiCancelSession(activeRefineSession.sessionId).catch(() => {})
}
process.exit(0)
})
}
main()