Skip to main content
Glama
AgentExecutor.test.ts14.3 kB
import { AgentExecutor, DEFAULT_EXECUTION_TIMEOUT, createExecutionConfig, } from 'src/execution/AgentExecutor' import type { ExecutionParams } from 'src/types/ExecutionParams' import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest' // Mock child_process module vi.mock('node:child_process', () => ({ spawn: vi.fn(), })) // Import the mocked module to get references import { spawn as mockSpawn } from 'node:child_process' describe('AgentExecutor', () => { let executor: AgentExecutor beforeEach(() => { vi.clearAllMocks() const testConfig = createExecutionConfig('cursor') // Use real agent type executor = new AgentExecutor(testConfig) // Setup spawn mock mockSpawn.mockImplementation((cmd: string, args: string[], options: any) => { // Extract the prompt which should be the last argument after -p flag const promptIndex = args.indexOf('-p') const prompt = promptIndex >= 0 && promptIndex < args.length - 1 ? args[promptIndex + 1] : '' // Check if the prompt contains agent information formatted as "agent: prompt text" const isTestAgent = prompt.includes('test-agent') const isBadAgent = prompt.includes('bad-agent') || prompt.includes('nonexistent-agent') const isSlowAgent = prompt.includes('slow-agent') const mockProcess = { stdin: { end: vi.fn(), }, stdout: { on: vi.fn((event, callback) => { if (event === 'data') { if (isTestAgent) { // Success case - simulate single JSON response (--output-format json) setTimeout(() => { // Send only the final result JSON callback( Buffer.from( `${JSON.stringify({ type: 'result', data: 'Test execution successful', })}\n` ) ) }, 10) } else if (isBadAgent || isSlowAgent) { // Don't send successful data for bad agents or slow agents } else { // Default success - send result JSON (cursor format) setTimeout(() => { callback( Buffer.from( `${JSON.stringify({ type: 'result', data: 'Default execution', })}\n` ) ) }, 10) } } }), }, stderr: { on: vi.fn((event, callback) => { if (event === 'data') { if (isBadAgent) { setTimeout(() => { callback(Buffer.from('Agent not found or execution failed')) }, 10) } else if (isSlowAgent) { // Don't send stderr for slow agent, let it timeout } } }), }, on: vi.fn((event, callback) => { if (event === 'close') { if (isSlowAgent) { // For slow agent, don't call close callback to simulate timeout // The timeout handler in AgentExecutor will kill the process } else { // Simulate process close with appropriate exit code const exitCode = isBadAgent ? 1 : 0 setTimeout(() => callback(exitCode), 50) } } else if (event === 'error' && isBadAgent) { // Trigger error for invalid scenarios setTimeout(() => { callback(new Error('Spawn execution failed')) }, 10) } else if (event === 'exit') { if (!isSlowAgent) { setTimeout(() => callback(), 50) } } }), kill: vi.fn(), } return mockProcess as any }) }) afterEach(() => { vi.restoreAllMocks() }) describe('createExecutionConfig', () => { it('should create config with default timeout when no overrides provided', () => { const config = createExecutionConfig('cursor') expect(config.agentType).toBe('cursor') expect(config.executionTimeout).toBe(DEFAULT_EXECUTION_TIMEOUT) }) it('should allow overriding execution timeout', () => { const customTimeout = 15000 const config = createExecutionConfig('cursor', { executionTimeout: customTimeout }) expect(config.executionTimeout).toBe(customTimeout) }) it('should support all agent types', () => { const cursorConfig = createExecutionConfig('cursor') const claudeConfig = createExecutionConfig('claude') const geminiConfig = createExecutionConfig('gemini') expect(cursorConfig.agentType).toBe('cursor') expect(claudeConfig.agentType).toBe('claude') expect(geminiConfig.agentType).toBe('gemini') }) }) describe('command generation', () => { it('should use cursor-agent command for cursor type', async () => { const cursorConfig = createExecutionConfig('cursor') const cursorExecutor = new AgentExecutor(cursorConfig) const params: ExecutionParams = { agent: 'test-agent', prompt: 'Test prompt', cwd: '/tmp', } await cursorExecutor.executeAgent(params) expect(mockSpawn).toHaveBeenCalledWith('cursor-agent', expect.any(Array), expect.any(Object)) }) it('should use claude command for claude type', async () => { const claudeConfig = createExecutionConfig('claude') const claudeExecutor = new AgentExecutor(claudeConfig) const params: ExecutionParams = { agent: 'test-agent', prompt: 'Test prompt', cwd: '/tmp', } await claudeExecutor.executeAgent(params) expect(mockSpawn).toHaveBeenCalledWith('claude', expect.any(Array), expect.any(Object)) }) it('should use gemini command for gemini type', async () => { const geminiConfig = createExecutionConfig('gemini') const geminiExecutor = new AgentExecutor(geminiConfig) const params: ExecutionParams = { agent: 'test-agent', prompt: 'Test prompt', cwd: '/tmp', } await geminiExecutor.executeAgent(params) expect(mockSpawn).toHaveBeenCalledWith('gemini', expect.any(Array), expect.any(Object)) }) }) describe('executeAgent', () => { it('should return successful result with parsed JSON on success', async () => { const params: ExecutionParams = { agent: 'test-agent', prompt: 'Help me', cwd: '/tmp', } const result = await executor.executeAgent(params) expect(result.exitCode).toBe(0) expect(result.hasResult).toBe(true) expect(result.resultJson).toEqual({ type: 'result', data: 'Test execution successful', }) expect(result.executionTime).toBeGreaterThan(0) }) it('should return non-zero exit code and error message on failure', async () => { const params: ExecutionParams = { agent: 'nonexistent-agent', prompt: 'This should fail', cwd: '/tmp', } const result = await executor.executeAgent(params) expect(result.exitCode).not.toBe(0) expect(result.stderr).toContain('Agent not found') expect(result.hasResult).toBe(false) }) it('should handle large prompts without truncation', async () => { const largePrompt = 'Generate detailed documentation'.repeat(200) const params: ExecutionParams = { agent: 'test-agent', prompt: largePrompt, cwd: '/tmp', } const result = await executor.executeAgent(params) expect(result.exitCode).toBe(0) expect(result.hasResult).toBe(true) // Verify prompt was passed to spawn (check mock was called with prompt containing full text) expect(mockSpawn).toHaveBeenCalledWith( expect.any(String), expect.arrayContaining(['-p', expect.stringContaining('Generate detailed documentation')]), expect.any(Object) ) }) it('should return error details when agent execution fails', async () => { const params: ExecutionParams = { agent: 'bad-agent', prompt: 'This should fail', cwd: '/invalid-directory', } const result = await executor.executeAgent(params) expect(result.exitCode).not.toBe(0) expect(result.stderr).toBeTruthy() expect(result.hasResult).toBe(false) expect(result.resultJson).toBeUndefined() }) }) describe('execution performance monitoring', () => { it('should measure execution time accurately', async () => { const params: ExecutionParams = { agent: 'test-agent', prompt: 'Quick task', cwd: '/tmp', } const startTime = Date.now() const result = await executor.executeAgent(params) const endTime = Date.now() expect(result.executionTime).toBeGreaterThanOrEqual(0) expect(result.executionTime).toBeLessThanOrEqual(endTime - startTime + 100) // Allow 100ms tolerance }) }) describe('error handling', () => { it('should handle invalid execution parameters', async () => { const invalidParams = { agent: '', prompt: '', cwd: '/tmp', } as ExecutionParams await expect(executor.executeAgent(invalidParams)).rejects.toThrow() }) it('should handle timeout scenarios', async () => { const timeoutConfig = createExecutionConfig('cursor', { executionTimeout: 100, }) const timeoutExecutor = new AgentExecutor(timeoutConfig) const params: ExecutionParams = { agent: 'slow-agent', prompt: 'This takes a long time', cwd: '/tmp', } const result = await timeoutExecutor.executeAgent(params) expect(result.exitCode).not.toBe(0) expect(result.stderr).toContain('timeout') }) }) describe('AgentExecutionResult extended fields', () => { it('should include hasResult field in execution result', async () => { const params: ExecutionParams = { agent: 'test-agent', prompt: 'Generate JSON response', cwd: '/tmp', } const result = await executor.executeAgent(params) // Test that hasResult field exists and is true when JSON is detected expect(result).toHaveProperty('hasResult') expect(result.hasResult).toBe(true) }) it('should include resultJson field with parsed JSON when available', async () => { const params: ExecutionParams = { agent: 'test-agent', prompt: 'Generate structured data', cwd: '/tmp', } const result = await executor.executeAgent(params) // Test that resultJson field exists with the parsed JSON expect(result).toHaveProperty('resultJson') expect(result.resultJson).toEqual({ type: 'result', data: 'Test execution successful', }) }) it('should set hasResult to false when no JSON is detected', async () => { const params: ExecutionParams = { agent: 'bad-agent', prompt: 'This will fail', cwd: '/tmp', } const result = await executor.executeAgent(params) // Test that hasResult is false when execution fails // For failed executions, hasResult and resultJson are explicitly set expect(result.hasResult).toBe(false) expect(result.resultJson).toBeUndefined() }) it('should handle SIGTERM (exit code 143) as normal when hasResult is true', async () => { // Mock a scenario where process is killed with SIGTERM after getting JSON const mockProcess = { stdin: { end: vi.fn() }, stdout: { on: vi.fn((event, callback) => { if (event === 'data') { // Send result JSON setTimeout(() => { callback(Buffer.from('{"type": "result", "data": "Success"}\n')) }, 10) } }), }, stderr: { on: vi.fn() }, on: vi.fn((event, callback) => { if (event === 'close') { // Return exit code 143 (SIGTERM) setTimeout(() => callback(143), 50) } }), kill: vi.fn(), } mockSpawn.mockImplementationOnce(() => mockProcess as any) const params: ExecutionParams = { agent: 'test-agent', prompt: 'Stream JSON data', cwd: '/tmp', } const result = await executor.executeAgent(params) // Should recognize exit code 143 with hasResult=true as success expect(result.exitCode).toBe(143) expect(result.hasResult).toBe(true) expect(result.resultJson).toBeDefined() }) it('should distinguish timeout with partial result from complete timeout', async () => { const timeoutConfig = createExecutionConfig('cursor', { executionTimeout: 100, }) const timeoutExecutor = new AgentExecutor(timeoutConfig) // Mock process that sends JSON but times out const mockProcess = { stdin: { end: vi.fn() }, stdout: { on: vi.fn((event, callback) => { if (event === 'data') { // Send partial result before timeout setTimeout(() => { callback(Buffer.from('{"type": "result", "partial": true}\n')) }, 50) } }), }, stderr: { on: vi.fn() }, on: vi.fn((event, callback) => { if (event === 'close') { // Simulate timeout exit code 124 setTimeout(() => callback(124), 150) } }), kill: vi.fn(), } mockSpawn.mockImplementationOnce(() => mockProcess as any) const params: ExecutionParams = { agent: 'partial-agent', prompt: 'Partial completion', cwd: '/tmp', } const result = await timeoutExecutor.executeAgent(params) // Should have partial result even with timeout expect(result.exitCode).toBe(124) expect(result.hasResult).toBe(true) expect(result.resultJson).toEqual({ type: 'result', partial: true }) }) }) })

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/shinpr/sub-agents-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server