Skip to main content
Glama

MCP Task

by just-every
safety-validation.test.ts5.98 kB
import { describe, it, expect, vi, beforeEach } from 'vitest'; import { spawn } from 'child_process'; import * as path from 'path'; describe('Safety Validation Tests', () => { let serverProcess: any; beforeEach(() => { vi.clearAllMocks(); }); describe('Multiple Models with Write Access Prevention', () => { it('should throw error when multiple models are used with read_only: false', async () => { // Mock the run_task request with multiple models and write access const mockRequest = { method: 'tools/call', params: { name: 'run_task', arguments: { task: 'Test task', model: ['gpt-5', 'claude-3.5'], read_only: false, }, }, }; // We can't easily test the actual server without spawning it, // so we'll test the logic directly by importing the validation // For now, we'll create a unit test that simulates the validation logic const validateTaskRequest = (args: any) => { const modelParam = args.model; const isBatch = Array.isArray(modelParam); const models = isBatch ? modelParam : [modelParam || 'standard']; // SAFETY CHECK: Prevent multiple models with write access if (!args.read_only && isBatch && models.length > 1) { throw new Error( 'Multiple models with write access (read_only: false) is not allowed to prevent file conflicts. ' + 'Please either:\n' + '1. Set read_only: true when using multiple models, or\n' + '2. Use a single model when write access is needed.' ); } return true; }; // Test that error is thrown for multiple models with write access expect(() => validateTaskRequest(mockRequest.params.arguments)).toThrow( 'Multiple models with write access' ); }); it('should allow multiple models with read_only: true', async () => { const mockRequest = { method: 'tools/call', params: { name: 'run_task', arguments: { task: 'Test task', model: ['gpt-5', 'claude-3.5'], read_only: true, }, }, }; const validateTaskRequest = (args: any) => { const modelParam = args.model; const isBatch = Array.isArray(modelParam); const models = isBatch ? modelParam : [modelParam || 'standard']; if (!args.read_only && isBatch && models.length > 1) { throw new Error( 'Multiple models with write access (read_only: false) is not allowed' ); } return true; }; // Test that no error is thrown for multiple models with read-only access expect(() => validateTaskRequest(mockRequest.params.arguments)).not.toThrow(); }); it('should allow single model with read_only: false', async () => { const mockRequest = { method: 'tools/call', params: { name: 'run_task', arguments: { task: 'Test task', model: 'gpt-5', read_only: false, }, }, }; const validateTaskRequest = (args: any) => { const modelParam = args.model; const isBatch = Array.isArray(modelParam); const models = isBatch ? modelParam : [modelParam || 'standard']; if (!args.read_only && isBatch && models.length > 1) { throw new Error( 'Multiple models with write access (read_only: false) is not allowed' ); } return true; }; // Test that no error is thrown for single model with write access expect(() => validateTaskRequest(mockRequest.params.arguments)).not.toThrow(); }); it('should handle undefined read_only (defaults to false) with multiple models', async () => { const mockRequest = { method: 'tools/call', params: { name: 'run_task', arguments: { task: 'Test task', model: ['gpt-5', 'claude-3.5'], // read_only is undefined, should default to false }, }, }; const validateTaskRequest = (args: any) => { const modelParam = args.model; const isBatch = Array.isArray(modelParam); const models = isBatch ? modelParam : [modelParam || 'standard']; // When read_only is undefined, it defaults to false (write access) const readOnly = args.read_only || false; if (!readOnly && isBatch && models.length > 1) { throw new Error( 'Multiple models with write access (read_only: false) is not allowed' ); } return true; }; // Test that error is thrown when read_only is undefined (defaults to false) with multiple models expect(() => validateTaskRequest(mockRequest.params.arguments)).toThrow( 'Multiple models with write access' ); }); }); });

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/just-every/mcp-task'

If you have feedback or need assistance with the MCP directory API, please join our Discord server