// src/services/mcpService.js
// API service for connecting React app to MCP server
const MCP_BASE_URL = 'http://localhost:8000';
class MCPService {
constructor() {
this.baseURL = MCP_BASE_URL;
}
// Helper method for making API calls
async apiCall(endpoint, options = {}) {
const url = `${this.baseURL}${endpoint}`;
const defaultOptions = {
headers: {
'Content-Type': 'application/json',
...options.headers
},
...options
};
try {
console.log(`MCP API Call: ${options.method || 'GET'} ${url}`);
const response = await fetch(url, defaultOptions);
if (!response.ok) {
const errorText = await response.text();
let errorMessage = `HTTP ${response.status}`;
let errorDetails = {};
// Try to parse detailed error response
try {
const errorData = JSON.parse(errorText);
if (errorData.error) {
// Enhanced error format from server
errorMessage = errorData.error.message || errorData.error;
errorDetails = errorData.error.details || {};
// Add helpful context
if (errorData.error.type) {
errorDetails.type = errorData.error.type;
}
} else if (errorData.detail) {
// FastAPI standard error format
errorMessage = errorData.detail;
}
} catch (parseError) {
// If JSON parsing fails, use raw error text
errorMessage += `: ${errorText}`;
}
// Create enhanced error object
const enhancedError = new Error(errorMessage);
enhancedError.status = response.status;
enhancedError.details = errorDetails;
enhancedError.endpoint = endpoint;
enhancedError.url = url;
throw enhancedError;
}
const data = await response.json();
console.log('MCP API Response:', data);
return data;
} catch (error) {
console.error('MCP API Error:', error);
// If it's already our enhanced error, re-throw it
if (error.status) {
throw error;
}
// Network or other errors
let enhancedMessage = 'API call failed';
if (error.name === 'TypeError' && error.message.includes('fetch')) {
enhancedMessage = 'Cannot connect to server. Please check if the MCP server is running.';
} else if (error.message.includes('timeout')) {
enhancedMessage = 'Request timed out. Please try again.';
} else {
enhancedMessage = `API call failed: ${error.message}`;
}
const enhancedError = new Error(enhancedMessage);
enhancedError.originalError = error;
enhancedError.endpoint = endpoint;
enhancedError.url = url;
throw enhancedError;
}
}
// Health check
async checkHealth() {
return await this.apiCall('/health');
}
// Chat with LLM (simple, no memory)
async chat(message, model = 'mistral:latest', options = {}) {
return await this.apiCall('/api/chat', {
method: 'POST',
body: JSON.stringify({
message,
model,
temperature: options.temperature || 0.7,
max_tokens: options.max_tokens || 1000,
})
});
}
// Memory-aware conversation chat
async conversationChat(message, conversationId, model = 'mistral:latest', options = {}) {
return await this.apiCall('/api/conversation/chat', {
method: 'POST',
body: JSON.stringify({
message,
conversation_id: conversationId,
model,
include_context: options.include_context !== false,
context_limit: options.context_limit || 10,
temperature: options.temperature || 0.7,
max_tokens: options.max_tokens || 1000,
})
});
}
// Stream chat with LLM (conversation-aware)
async chatStream(message, model = 'mistral:latest', options = {}, onChunk = null) {
const url = `${this.baseURL}/api/conversation/chat/stream`;
const response = await fetch(url, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
question: message, // Changed from 'message' to 'question'
session_id: options.session_id || 'default_session',
database: options.database || 'db3',
provider: options.provider || 'ollama'
})
});
return this._handleStreamResponse(response, model, onChunk);
}
// Simple stream chat (no memory/conversation)
async simpleChatStream(message, model = 'mistral:latest', options = {}, onChunk = null) {
const url = `${this.baseURL}/api/chat/stream`;
const response = await fetch(url, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
message: message, // Uses 'message' field
model,
temperature: options.temperature || 0.7,
max_tokens: options.max_tokens || 1000
})
});
return this._handleStreamResponse(response, model, onChunk);
}
// Helper method to handle streaming responses
async _handleStreamResponse(response, model, onChunk) {
if (!response.ok) {
const errorText = await response.text();
let errorMessage = `HTTP ${response.status}`;
let errorDetails = {};
// Try to parse detailed error response
try {
const errorData = JSON.parse(errorText);
if (errorData.error) {
errorMessage = errorData.error.message || errorData.error;
errorDetails = errorData.error.details || {};
} else if (errorData.detail) {
errorMessage = errorData.detail;
}
} catch (parseError) {
errorMessage += `: ${errorText}`;
}
// Create enhanced error for streaming
const enhancedError = new Error(errorMessage);
enhancedError.status = response.status;
enhancedError.details = errorDetails;
enhancedError.isStreamingError = true;
throw enhancedError;
}
const reader = response.body.getReader();
const decoder = new TextDecoder();
let fullResponse = '';
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
const chunk = decoder.decode(value, { stream: true });
const lines = chunk.split('\n');
for (const line of lines) {
if (line.startsWith('data: ')) {
try {
const data = JSON.parse(line.slice(6));
if (data.error) {
throw new Error(data.error);
}
if (data.content) {
fullResponse += data.content;
if (onChunk) {
onChunk(data.content, data);
}
}
if (data.done) {
return {
response: fullResponse,
model: data.model || model,
provider: data.provider || 'unknown'
};
}
} catch (e) {
// Skip invalid JSON lines
continue;
}
}
}
}
return {
response: fullResponse,
model: model,
provider: 'unknown'
};
} finally {
reader.releaseLock();
}
}
// Echo test (for debugging)
async echo(text) {
return await this.apiCall('/api/echo', {
method: 'POST',
body: JSON.stringify({ text })
});
}
// Memory operations
async storeMemory(conversationId, content, metadata = {}, role = 'user', importance = 1.0) {
return await this.apiCall('/api/memory/store', {
method: 'POST',
body: JSON.stringify({
conversation_id: conversationId,
content,
metadata,
role,
importance,
})
});
}
async getMemory(conversationId, limit = null, minImportance = 0.0) {
return await this.apiCall('/api/memory/get', {
method: 'POST',
body: JSON.stringify({
conversation_id: conversationId,
limit,
min_importance: minImportance,
})
});
}
async searchMemory(query, conversationId = null, limit = 10) {
const params = new URLSearchParams({ query, limit: limit.toString() });
if (conversationId) {
params.append('conversation_id', conversationId);
}
return await this.apiCall(`/api/memory/search?${params}`);
}
// Get available models
async getModels() {
return await this.apiCall('/api/models');
}
// Get all MCP capabilities and tools
async getCapabilities() {
return await this.apiCall('/api/capabilities');
}
// Call a specific MCP tool
async callTool(toolName, parameters = {}) {
return await this.apiCall('/api/tools/call', {
method: 'POST',
body: JSON.stringify({
tool_name: toolName,
parameters
})
});
}
}
// Export a singleton instance
export default new MCPService();