import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
import { z } from 'zod';
import { driftClient } from '../services/drift-client.js';
export function registerContextTools(server: McpServer): void {
server.registerTool(
'driftos_get_context',
{
title: 'Get Conversation Context',
description: `Get assembled context for a conversation branch, including messages and facts from related branches.
This is what you pass to an LLM instead of the entire conversation history. Returns only the relevant messages from the current branch plus accumulated facts.
Args:
- branch_id (string): The branch ID to get context for (returned from route_message)
Returns:
{
"branchId": string,
"branchTopic": string,
"messages": [
{ "role": "user" | "assistant", "content": string }
],
"allFacts": [
{
"branchTopic": string,
"isCurrent": boolean,
"facts": [{ "key": string, "value": string, "confidence": number }]
}
]
}
Use this to build focused LLM context windows instead of dumping entire conversation history.`,
inputSchema: z.object({
branch_id: z.string().min(1).describe('Branch ID to get context for'),
}).strict(),
annotations: {
readOnlyHint: true,
destructiveHint: false,
idempotentHint: true,
openWorldHint: false,
},
},
async (params) => {
try {
const result = await driftClient.getContext(params.branch_id);
return {
content: [
{
type: 'text' as const,
text: JSON.stringify(result, null, 2),
},
],
};
} catch (error) {
const message = error instanceof Error ? error.message : 'Unknown error';
return {
content: [
{
type: 'text' as const,
text: `Error getting context: ${message}`,
},
],
isError: true,
};
}
}
);
server.registerTool(
'driftos_build_prompt',
{
title: 'Build LLM Prompt',
description: `Build a ready-to-use prompt for LLM calls with context and facts.
Args:
- branch_id (string): The branch ID to build prompt for
- system_prompt (string, optional): Custom system prompt prefix
Returns:
{
"system": string, // Full system prompt with topic and facts
"messages": [{ "role": string, "content": string }] // Conversation messages
}
Use this to get a complete prompt ready for OpenAI/Anthropic/etc API calls.`,
inputSchema: z.object({
branch_id: z.string().min(1).describe('Branch ID to build prompt for'),
system_prompt: z.string().optional().describe('Custom system prompt prefix'),
}).strict(),
annotations: {
readOnlyHint: true,
destructiveHint: false,
idempotentHint: true,
openWorldHint: false,
},
},
async (params) => {
try {
const result = await driftClient.buildPrompt(
params.branch_id,
params.system_prompt
);
return {
content: [
{
type: 'text' as const,
text: JSON.stringify(result, null, 2),
},
],
};
} catch (error) {
const message = error instanceof Error ? error.message : 'Unknown error';
return {
content: [
{
type: 'text' as const,
text: `Error building prompt: ${message}`,
},
],
isError: true,
};
}
}
);
}