Deepseek-Thinking-Claude-3.5-Sonnet-CLINE-MCP
by niko91i
#!/usr/bin/env node
import { Server } from "@modelcontextprotocol/sdk/server/index.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import {
CallToolRequestSchema,
ErrorCode,
ListToolsRequestSchema,
McpError,
} from "@modelcontextprotocol/sdk/types.js";
import { OpenAI } from "openai";
import dotenv from "dotenv";
import * as os from "os";
import * as path from "path";
import * as fs from "fs/promises";
import { v4 as uuidv4 } from "uuid";
// Load environment variables
dotenv.config();
// Debug logging
const DEBUG = true;
const log = (...args: any[]) => {
if (DEBUG) {
console.error("[DEEPSEEK-CLAUDE MCP]", ...args);
}
};
// Constants - Utiliser uniquement le modèle DeepSeek
const DEEPSEEK_MODEL =
process.env.DEEPSEEK_MODEL || "deepseek/deepseek-chat-v3-0324:free";
// Ne plus utiliser Claude du tout
// const CLAUDE_MODEL = "anthropic/claude-3.5-sonnet:beta";
// Constants pour le mécanisme de gestion des statuts
const INITIAL_STATUS_CHECK_DELAY_MS = 5000; // 5 secondes pour la première vérification
const MAX_STATUS_CHECK_DELAY_MS = 60000; // 1 minute maximum entre les vérifications
const STATUS_CHECK_BACKOFF_FACTOR = 1.5; // Facteur d'augmentation du délai
const MAX_STATUS_CHECK_ATTEMPTS = 20; // Nombre maximal de tentatives (évite boucle infinie)
const TASK_TIMEOUT_MS = 10 * 60 * 1000; // 10 minutes maximum pour une tâche
interface ConversationEntry {
timestamp: number;
prompt: string;
reasoning: string;
response: string;
model: string;
}
interface ConversationContext {
entries: ConversationEntry[];
maxEntries: number;
}
interface GenerateResponseArgs {
prompt: string;
showReasoning?: boolean;
clearContext?: boolean;
includeHistory?: boolean;
}
interface CheckResponseStatusArgs {
taskId: string;
}
interface TaskStatus {
status: "pending" | "reasoning" | "responding" | "complete" | "error";
prompt: string;
showReasoning?: boolean;
reasoning?: string;
response?: string;
error?: string;
timestamp: number;
// Nouvelles propriétés pour gérer le polling
lastChecked?: number;
nextCheckDelay?: number;
checkAttempts?: number;
}
const isValidCheckResponseStatusArgs = (
args: any
): args is CheckResponseStatusArgs =>
typeof args === "object" && args !== null && typeof args.taskId === "string";
interface ClaudeMessage {
role: "user" | "assistant";
content: string | { type: string; text: string }[];
}
interface UiMessage {
ts: number;
type: string;
say?: string;
ask?: string;
text: string;
conversationHistoryIndex: number;
}
const isValidGenerateResponseArgs = (args: any): args is GenerateResponseArgs =>
typeof args === "object" &&
args !== null &&
typeof args.prompt === "string" &&
(args.showReasoning === undefined ||
typeof args.showReasoning === "boolean") &&
(args.clearContext === undefined || typeof args.clearContext === "boolean") &&
(args.includeHistory === undefined ||
typeof args.includeHistory === "boolean");
function getClaudePath(): string {
const homeDir = os.homedir();
switch (process.platform) {
case "win32":
return path.join(
homeDir,
"AppData",
"Roaming",
"Code",
"User",
"globalStorage",
"saoudrizwan.claude-dev",
"tasks"
);
case "darwin":
return path.join(
homeDir,
"Library",
"Application Support",
"Code",
"User",
"globalStorage",
"saoudrizwan.claude-dev",
"tasks"
);
default: // linux
return path.join(
homeDir,
".config",
"Code",
"User",
"globalStorage",
"saoudrizwan.claude-dev",
"tasks"
);
}
}
async function findActiveConversation(): Promise<ClaudeMessage[] | null> {
try {
const tasksPath = getClaudePath();
const dirs = await fs.readdir(tasksPath);
// Get modification time for each api_conversation_history.json
const dirStats = await Promise.all(
dirs.map(async (dir) => {
try {
const historyPath = path.join(
tasksPath,
dir,
"api_conversation_history.json"
);
const stats = await fs.stat(historyPath);
const uiPath = path.join(tasksPath, dir, "ui_messages.json");
const uiContent = await fs.readFile(uiPath, "utf8");
const uiMessages: UiMessage[] = JSON.parse(uiContent);
const hasEnded = uiMessages.some(
(m) => m.type === "conversation_ended"
);
return {
dir,
mtime: stats.mtime.getTime(),
hasEnded,
};
} catch (error) {
log("Error checking folder:", dir, error);
return null;
}
})
);
// Filter out errors and ended conversations, then sort by modification time
const sortedDirs = dirStats
.filter(
(stat): stat is NonNullable<typeof stat> =>
stat !== null && !stat.hasEnded
)
.sort((a, b) => b.mtime - a.mtime);
// Use most recently modified active conversation
const latest = sortedDirs[0]?.dir;
if (!latest) {
log("No active conversations found");
return null;
}
const historyPath = path.join(
tasksPath,
latest,
"api_conversation_history.json"
);
const history = await fs.readFile(historyPath, "utf8");
return JSON.parse(history);
} catch (error) {
log("Error finding active conversation:", error);
return null;
}
}
function formatHistoryForModel(
history: ClaudeMessage[],
isDeepSeek: boolean
): string {
const maxLength = isDeepSeek ? 50000 : 600000; // 50k chars for DeepSeek, 600k for Claude
const formattedMessages = [];
let totalLength = 0;
// Process messages in reverse chronological order to get most recent first
for (let i = history.length - 1; i >= 0; i--) {
const msg = history[i];
const content = Array.isArray(msg.content)
? msg.content.map((c) => c.text).join("\n")
: msg.content;
const formattedMsg = `${
msg.role === "user" ? "Human" : "Assistant"
}: ${content}`;
const msgLength = formattedMsg.length;
// Stop adding messages if we'd exceed the limit
if (totalLength + msgLength > maxLength) {
break;
}
formattedMessages.push(formattedMsg); // Add most recent messages first
totalLength += msgLength;
}
// Reverse to get chronological order
return formattedMessages.reverse().join("\n\n");
}
class DeepseekClaudeServer {
private server: Server;
private openrouterClient: OpenAI;
private context: ConversationContext = {
entries: [],
maxEntries: 10,
};
private activeTasks: Map<string, TaskStatus> = new Map();
constructor() {
log("Initializing API clients...");
// Initialize OpenRouter client
this.openrouterClient = new OpenAI({
baseURL: "https://openrouter.ai/api/v1",
apiKey: process.env.OPENROUTER_API_KEY,
});
log("OpenRouter client initialized");
// Initialize MCP server
this.server = new Server(
{
name: "deepseek-thinking-claude-mcp",
version: "0.1.0",
},
{
capabilities: {
tools: {},
},
}
);
this.setupToolHandlers();
// Error handling
this.server.onerror = (error) => console.error("[MCP Error]", error);
process.on("SIGINT", async () => {
await this.server.close();
process.exit(0);
});
}
private addToContext(entry: ConversationEntry) {
// Modifier pour utiliser DEEPSEEK_MODEL au lieu de CLAUDE_MODEL
const entryWithUpdatedModel = {
...entry,
model: DEEPSEEK_MODEL,
};
this.context.entries.push(entryWithUpdatedModel);
if (this.context.entries.length > this.context.maxEntries) {
this.context.entries.shift(); // Remove oldest
}
}
private formatContextForPrompt(): string {
return this.context.entries
.map(
(entry) =>
`Question: ${entry.prompt}\nReasoning: ${entry.reasoning}\nAnswer: ${entry.response}`
)
.join("\n\n");
}
private setupToolHandlers() {
this.server.setRequestHandler(ListToolsRequestSchema, async () => ({
tools: [
{
name: "generate_response",
description:
"Generate a response using DeepSeek's reasoning and Claude's response generation through OpenRouter.",
inputSchema: {
type: "object",
properties: {
prompt: {
type: "string",
description: "The user's input prompt",
},
showReasoning: {
type: "boolean",
description: "Whether to include reasoning in response",
default: false,
},
clearContext: {
type: "boolean",
description: "Clear conversation history before this request",
default: false,
},
includeHistory: {
type: "boolean",
description: "Include Cline conversation history for context",
default: true,
},
},
required: ["prompt"],
},
},
{
name: "check_response_status",
description: "Check the status of a response generation task",
inputSchema: {
type: "object",
properties: {
taskId: {
type: "string",
description: "The task ID returned by generate_response",
},
},
required: ["taskId"],
},
},
],
}));
this.server.setRequestHandler(CallToolRequestSchema, async (request) => {
if (request.params.name === "generate_response") {
if (!isValidGenerateResponseArgs(request.params.arguments)) {
throw new McpError(
ErrorCode.InvalidParams,
"Invalid generate_response arguments"
);
}
const taskId = uuidv4();
const { prompt, showReasoning, clearContext, includeHistory } =
request.params.arguments;
// Initialize task status with les propriétés de suivi pour le polling
this.activeTasks.set(taskId, {
status: "pending",
prompt,
showReasoning,
timestamp: Date.now(),
lastChecked: Date.now(),
nextCheckDelay: INITIAL_STATUS_CHECK_DELAY_MS,
checkAttempts: 0
});
// Start processing in background
this.processTask(taskId, clearContext, includeHistory).catch(
(error) => {
log("Error processing task:", error);
this.activeTasks.set(taskId, {
...this.activeTasks.get(taskId)!,
status: "error",
error: error.message,
});
}
);
// Return task ID immediately
return {
content: [
{
type: "text",
text: JSON.stringify({
taskId,
suggestedWaitTime: Math.round(INITIAL_STATUS_CHECK_DELAY_MS / 1000) // Temps suggéré en secondes
}),
},
],
};
} else if (request.params.name === "check_response_status") {
if (!isValidCheckResponseStatusArgs(request.params.arguments)) {
throw new McpError(
ErrorCode.InvalidParams,
"Invalid check_response_status arguments"
);
}
const taskId = request.params.arguments.taskId;
const task = this.activeTasks.get(taskId);
if (!task) {
throw new McpError(
ErrorCode.InvalidRequest,
`No task found with ID: ${taskId}`
);
}
// Vérifier si la tâche a expiré
const currentTime = Date.now();
if (currentTime - task.timestamp > TASK_TIMEOUT_MS) {
const updatedTask = {
...task,
status: "error" as const,
error: `Tâche expirée après ${TASK_TIMEOUT_MS / 60000} minutes`
};
this.activeTasks.set(taskId, updatedTask);
return {
content: [
{
type: "text",
text: JSON.stringify({
status: updatedTask.status,
reasoning: updatedTask.showReasoning ? updatedTask.reasoning : undefined,
response: undefined,
error: updatedTask.error,
timeoutAfter: TASK_TIMEOUT_MS / 60000
})
}
]
};
}
// Mettre à jour les propriétés de suivi
const checkAttempts = (task.checkAttempts || 0) + 1;
// Vérifier si nous avons atteint le nombre maximal de tentatives
if (checkAttempts > MAX_STATUS_CHECK_ATTEMPTS && task.status !== "complete" && task.status !== "error") {
const updatedTask = {
...task,
status: "error" as const,
error: `Nombre maximum de tentatives atteint (${MAX_STATUS_CHECK_ATTEMPTS})`,
checkAttempts
};
this.activeTasks.set(taskId, updatedTask);
return {
content: [
{
type: "text",
text: JSON.stringify({
status: updatedTask.status,
reasoning: updatedTask.showReasoning ? updatedTask.reasoning : undefined,
response: undefined,
error: updatedTask.error,
maxAttempts: MAX_STATUS_CHECK_ATTEMPTS
})
}
]
};
}
// Calculer le délai avant la prochaine vérification (backoff exponentiel)
let nextCheckDelay = task.nextCheckDelay || INITIAL_STATUS_CHECK_DELAY_MS;
nextCheckDelay = Math.min(nextCheckDelay * STATUS_CHECK_BACKOFF_FACTOR, MAX_STATUS_CHECK_DELAY_MS);
// Mettre à jour le statut de la tâche
const updatedTask = {
...task,
lastChecked: currentTime,
nextCheckDelay,
checkAttempts
};
this.activeTasks.set(taskId, updatedTask);
return {
content: [
{
type: "text",
text: JSON.stringify({
status: task.status,
reasoning: task.showReasoning ? task.reasoning : undefined,
response: task.status === "complete" ? task.response : undefined,
error: task.error,
nextCheckIn: Math.round(nextCheckDelay / 1000), // Temps suggéré en secondes
checkAttempts,
elapsedTime: Math.round((currentTime - task.timestamp) / 1000) // Temps écoulé en secondes
}),
},
],
};
} else {
throw new McpError(
ErrorCode.MethodNotFound,
`Unknown tool: ${request.params.name}`
);
}
});
}
private async processTask(
taskId: string,
clearContext?: boolean,
includeHistory?: boolean
): Promise<void> {
const task = this.activeTasks.get(taskId);
if (!task) {
throw new Error(`No task found with ID: ${taskId}`);
}
try {
if (clearContext) {
this.context.entries = [];
}
// Update status to reasoning
this.activeTasks.set(taskId, {
...task,
status: "reasoning",
});
// Get Cline conversation history if requested
let history: ClaudeMessage[] | null = null;
if (includeHistory !== false) {
history = await findActiveConversation();
}
// Get DeepSeek reasoning with limited history
const reasoningHistory = history
? formatHistoryForModel(history, true)
: "";
const reasoningPrompt = reasoningHistory
? `${reasoningHistory}\n\nNew question: ${task.prompt}`
: task.prompt;
const reasoning = await this.getDeepseekReasoning(reasoningPrompt);
// Update status with reasoning
this.activeTasks.set(taskId, {
...task,
status: "responding",
reasoning,
});
// Get final response with full history
const responseHistory = history
? formatHistoryForModel(history, false)
: "";
const fullPrompt = responseHistory
? `${responseHistory}\n\nCurrent task: ${task.prompt}`
: task.prompt;
const response = await this.getFinalResponse(fullPrompt, reasoning);
// Add to context after successful response
this.addToContext({
timestamp: Date.now(),
prompt: task.prompt,
reasoning,
response,
model: DEEPSEEK_MODEL, // Utiliser DEEPSEEK_MODEL au lieu de CLAUDE_MODEL
});
// Update status to complete
this.activeTasks.set(taskId, {
...task,
status: "complete",
reasoning,
response,
timestamp: Date.now(),
});
} catch (error) {
// Update status to error
this.activeTasks.set(taskId, {
...task,
status: "error",
error: error instanceof Error ? error.message : "Unknown error",
timestamp: Date.now(),
});
throw error;
}
}
private async getDeepseekReasoning(prompt: string): Promise<string> {
const contextPrompt =
this.context.entries.length > 0
? `Previous conversation:\n${this.formatContextForPrompt()}\n\nNew question: ${prompt}`
: prompt;
try {
// Ajouter instruction explicite pour que le modèle génère un raisonnement
const requestPrompt = `Analyse la question suivante en détail avant de répondre. Réfléchis étape par étape et expose ton raisonnement complet.\n\n${contextPrompt}`;
// Get reasoning from DeepSeek (sans le paramètre include_reasoning)
const response = await this.openrouterClient.chat.completions.create({
model: DEEPSEEK_MODEL,
messages: [
{
role: "user",
content: requestPrompt,
},
],
temperature: 0.7,
top_p: 1,
});
// Utiliser directement le contenu de la réponse comme raisonnement
if (
!response.choices ||
!response.choices[0] ||
!response.choices[0].message ||
!response.choices[0].message.content
) {
throw new Error("Réponse vide de DeepSeek");
}
return response.choices[0].message.content;
} catch (error) {
log("Error in getDeepseekReasoning:", error);
throw error;
}
}
private async getFinalResponse(
prompt: string,
reasoning: string
): Promise<string> {
try {
// Au lieu d'envoyer à Claude, on utilise DeepSeek pour la réponse finale aussi
const response = await this.openrouterClient.chat.completions.create({
model: DEEPSEEK_MODEL, // Utiliser DeepSeek ici
messages: [
{
role: "user",
content: `${prompt}\n\nVoici mon analyse préalable de cette question: ${reasoning}\nMaintenant, génère une réponse complète et détaillée basée sur cette analyse.`,
},
],
temperature: 0.7,
top_p: 1,
} as any);
return (
response.choices[0].message.content || "Error: No response content"
);
} catch (error) {
log("Error in getFinalResponse:", error);
throw error;
}
}
async run() {
const transport = new StdioServerTransport();
await this.server.connect(transport);
console.error("DeepSeek-Claude MCP server running on stdio");
}
}
const server = new DeepseekClaudeServer();
server.run().catch(console.error);
ID: c4kobnk28y