import dotenv from 'dotenv';
import { GoogleGenerativeAI } from '@google/generative-ai';
import { Client } from '@modelcontextprotocol/sdk/client/index.js';
import { StdioClientTransport } from '@modelcontextprotocol/sdk/client/stdio.js';
import readline from 'readline';
import path from 'path';
dotenv.config();
const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY!);
const model = genAI.getGenerativeModel({
model: 'gemini-2.5-pro', // Updated to Gemini 2.5 Pro
generationConfig: {
temperature: 0.7,
maxOutputTokens: 2048,
topK: 40,
topP: 0.95,
}
});
async function main() {
const client = new Client({ name: 'org-client', version: '1.0.0' });
const transport = new StdioClientTransport({
command: process.execPath,
args: [path.resolve(process.cwd(), 'dist/server/index.js')],
env: process.env as Record<string, string>
});
await client.connect(transport);
console.log('Connected to server!');
const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
while (true) {
const userInput = await new Promise<string>(resolve => rl.question('\nYou: ', resolve));
if (userInput.toLowerCase() === 'exit') break;
const tools = await client.listTools();
const functionDeclarations = tools.tools.map(t => ({
name: t.name,
description: t.description || '',
parameters: t.inputSchema as any
}));
const chat = model.startChat({ tools: [{ functionDeclarations }] });
const result = await chat.sendMessage(userInput);
const response = result.response;
const functionCalls = response.functionCalls();
if (functionCalls && functionCalls.length > 0) {
for (const fc of functionCalls) {
const toolResult = await client.callTool({ name: fc.name, arguments: (fc.args || {}) as Record<string, unknown> });
console.log('\nResult:', JSON.stringify(toolResult.content, null, 2));
}
} else {
console.log('\nAssistant:', response.text());
}
}
await client.close();
process.exit(0);
}
main().catch(console.error);