Skip to main content
Glama

McFlow

deploy.test.ts13.7 kB
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest'; import { WorkflowDeployer } from '../src/deploy'; import fs from 'fs/promises'; import path from 'path'; import { fileURLToPath } from 'url'; import { execSync } from 'child_process'; // Mock child_process vi.mock('child_process', () => ({ execSync: vi.fn() })); const __dirname = path.dirname(fileURLToPath(import.meta.url)); const realWorkflowsPath = path.join(__dirname, '..'); describe('WorkflowDeployer', () => { let deployer: WorkflowDeployer; let testWorkflowsPath: string; beforeEach(async () => { // Clear all mocks vi.clearAllMocks(); // Create unique test directory for each test testWorkflowsPath = path.join(__dirname, `test-deploy-${Date.now()}-${Math.random().toString(36).substring(7)}`); // Create test directory structure await fs.mkdir(path.join(testWorkflowsPath, 'workflows', 'flows'), { recursive: true }); await fs.mkdir(path.join(testWorkflowsPath, 'workflows', 'nodes', 'code'), { recursive: true }); await fs.mkdir(path.join(testWorkflowsPath, 'workflows', 'nodes', 'prompts'), { recursive: true }); await fs.mkdir(path.join(testWorkflowsPath, 'workflows', 'dist'), { recursive: true }); deployer = new WorkflowDeployer({ workflowsPath: testWorkflowsPath, n8nUrl: 'http://localhost:5678', }); }); afterEach(async () => { // Clean up test directory if (testWorkflowsPath) { await fs.rm(testWorkflowsPath, { recursive: true, force: true }).catch(() => {}); } }); describe('deployWorkflow', () => { it('should compile workflow before deployment', async () => { // Create a test workflow with external code reference const workflow = { name: 'test-deploy-workflow', description: 'Test workflow for deployment', nodes: [ { name: 'Code Node', type: 'n8n-nodes-base.code', parameters: { nodeContent: { jsCode: 'deploy-test-code' } } } ], connections: {} }; // Create the external code file const codeContent = 'console.log("Deployed code!");'; await fs.writeFile( path.join(testWorkflowsPath, 'workflows', 'nodes', 'code', 'deploy-test-code.js'), codeContent ); // Save the workflow const workflowPath = path.join(testWorkflowsPath, 'workflows', 'flows', 'test-deploy-workflow.json'); await fs.writeFile(workflowPath, JSON.stringify(workflow, null, 2)); // Mock execSync to simulate successful deployment const mockedExecSync = execSync as unknown as ReturnType<typeof vi.fn>; mockedExecSync.mockReturnValue('Successfully imported 1 workflow'); // Deploy the workflow await deployer.deployWorkflow(workflowPath); // Verify execSync was called with the correct command expect(mockedExecSync).toHaveBeenCalled(); const callArgs = mockedExecSync.mock.calls[0]; expect(callArgs[0]).toContain('n8n import:workflow'); expect(callArgs[0]).toContain('--input='); // Read the temp file that was created (we can't access it directly, but we can verify the structure) // The workflow should have been compiled (code injected) expect(mockedExecSync).toHaveBeenCalledTimes(1); }); it('should preserve description field during deployment', async () => { const testDescription = 'Deployment test at ' + new Date().toISOString(); // Create a test workflow with a description const workflow = { name: 'test-description-workflow', description: testDescription, nodes: [ { name: 'Webhook', type: 'n8n-nodes-base.webhook', parameters: { path: '/test' } } ], connections: {} }; // Save the workflow const workflowPath = path.join(testWorkflowsPath, 'workflows', 'flows', 'test-description-workflow.json'); await fs.writeFile(workflowPath, JSON.stringify(workflow, null, 2)); // Mock execSync and capture the deployed workflow let deployedWorkflow: any = null; const mockedExecSync = execSync as unknown as ReturnType<typeof vi.fn>; mockedExecSync.mockImplementation((command: string) => { // Extract the temp file path from the command const match = command.match(/--input=([^\s]+)/); if (match && match[1]) { try { // In real test, we'd read the file, but here we'll verify the command structure return 'Successfully imported 1 workflow'; } catch (e) { // File might be cleaned up already } } return 'Successfully imported 1 workflow'; }); // Deploy the workflow await deployer.deployWorkflow(workflowPath); // Verify the deployment command was called expect(mockedExecSync).toHaveBeenCalled(); }); it('should handle workflows without external files', async () => { const workflow = { name: 'simple-workflow', description: 'Simple workflow without external files', nodes: [ { name: 'Start', type: 'n8n-nodes-base.start', parameters: {} } ], connections: {} }; // Save the workflow const workflowPath = path.join(testWorkflowsPath, 'workflows', 'flows', 'simple-workflow.json'); await fs.writeFile(workflowPath, JSON.stringify(workflow, null, 2)); // Mock execSync const mockedExecSync = execSync as unknown as ReturnType<typeof vi.fn>; mockedExecSync.mockReturnValue('Successfully imported 1 workflow'); // Deploy should work without external files await deployer.deployWorkflow(workflowPath); expect(mockedExecSync).toHaveBeenCalledTimes(1); }); it('should skip compilation when explicitly requested', async () => { const workflow = { name: 'skip-compile-workflow', nodes: [ { name: 'Code Node', type: 'n8n-nodes-base.code', parameters: { nodeContent: { jsCode: 'should-not-be-compiled' } } } ], connections: {} }; // Save the workflow const workflowPath = path.join(testWorkflowsPath, 'workflows', 'flows', 'skip-compile-workflow.json'); await fs.writeFile(workflowPath, JSON.stringify(workflow, null, 2)); // Mock execSync const mockedExecSync = execSync as unknown as ReturnType<typeof vi.fn>; mockedExecSync.mockReturnValue('Successfully imported 1 workflow'); // Deploy with skipCompilation flag await deployer.deployWorkflow(workflowPath, true); // The workflow should be deployed as-is, with nodeContent still present expect(mockedExecSync).toHaveBeenCalledTimes(1); }); it('should handle deployment failures gracefully', async () => { const workflow = { name: 'fail-workflow', nodes: [], connections: {} }; // Save the workflow const workflowPath = path.join(testWorkflowsPath, 'workflows', 'flows', 'fail-workflow.json'); await fs.writeFile(workflowPath, JSON.stringify(workflow, null, 2)); // Mock execSync to throw an error const mockedExecSync = execSync as unknown as ReturnType<typeof vi.fn>; mockedExecSync.mockImplementation(() => { throw new Error('n8n CLI not found'); }); // Deployment should throw await expect(deployer.deployWorkflow(workflowPath)).rejects.toThrow('n8n CLI not found'); }); }); describe('deployProject', () => { it('should deploy multiple workflows in correct order', async () => { // Create test workflows with different naming patterns const configWorkflow = { name: 'config-workflow', nodes: [], connections: {} }; const mainWorkflow = { name: 'main-workflow', nodes: [], connections: {} }; const regularWorkflow = { name: 'regular-workflow', nodes: [], connections: {} }; // Save all workflows before deploying await fs.writeFile( path.join(testWorkflowsPath, 'workflows', 'flows', 'config-workflow.json'), JSON.stringify(configWorkflow, null, 2) ); await fs.writeFile( path.join(testWorkflowsPath, 'workflows', 'flows', 'main-workflow.json'), JSON.stringify(mainWorkflow, null, 2) ); await fs.writeFile( path.join(testWorkflowsPath, 'workflows', 'flows', 'regular-workflow.json'), JSON.stringify(regularWorkflow, null, 2) ); // Mock execSync const mockedExecSync = execSync as unknown as ReturnType<typeof vi.fn>; const deploymentOrder: string[] = []; mockedExecSync.mockImplementation((command: string) => { // Extract workflow name from temp file to track deployment order // In a real scenario, we'd parse the temp file deploymentOrder.push(command); return 'Successfully imported 1 workflow'; }); // Deploy the project await deployer.deployProject(testWorkflowsPath); // Verify all workflows were deployed expect(mockedExecSync).toHaveBeenCalledTimes(3); // Config should be deployed first, main should be deployed last // (We can't easily verify the exact order from the mocked calls, // but we can verify that all were called) expect(deploymentOrder.length).toBe(3); }); }); describe('Real workflow deployment test', () => { it('should compile and prepare ai-assistant-example for deployment', async () => { const realDeployer = new WorkflowDeployer({ workflowsPath: realWorkflowsPath }); const workflowPath = path.join(realWorkflowsPath, 'workflows', 'flows', 'ai-assistant-example.json'); // Check if workflow exists try { await fs.access(workflowPath); } catch { console.log('Skipping real workflow deployment test - workflow not found'); return; } // Read the original workflow const originalContent = await fs.readFile(workflowPath, 'utf-8'); const originalWorkflow = JSON.parse(originalContent); // Mock execSync to prevent actual deployment but verify the workflow structure const mockedExecSync = execSync as unknown as ReturnType<typeof vi.fn>; let deployedWorkflowContent: any = null; mockedExecSync.mockImplementation((command: string, options: any) => { // Extract the temp file path and read the compiled workflow const match = command.match(/--input=([^\s]+)/); if (match && match[1]) { try { // Since this is a mock, we'll just verify the command structure // In a real test with access to the filesystem during execution, // we could read the temp file here deployedWorkflowContent = { verified: true, tempPath: match[1] }; } catch (e) { // File might already be cleaned up } } return 'Successfully imported 1 workflow'; }); // Deploy the workflow (will compile it) await realDeployer.deployWorkflow(workflowPath); // Verify deployment was attempted expect(mockedExecSync).toHaveBeenCalled(); // Verify the original description is preserved expect(originalWorkflow.description).toBeDefined(); // The deployment command should have been called with a temp file const callArgs = mockedExecSync.mock.calls[0]; expect(callArgs[0]).toContain('n8n import:workflow'); expect(callArgs[0]).toMatch(/--input=\/tmp\/compiled_workflow_\d+\.json/); }); }); describe('compileAll', () => { it('should compile all workflows and save to dist', async () => { // Create multiple test workflows const workflow1 = { name: 'compile-test-1', description: 'First workflow to compile', nodes: [], connections: {} }; const workflow2 = { name: 'compile-test-2', description: 'Second workflow to compile', nodes: [], connections: {} }; // Save workflows await fs.writeFile( path.join(testWorkflowsPath, 'workflows', 'flows', 'compile-test-1.json'), JSON.stringify(workflow1, null, 2) ); await fs.writeFile( path.join(testWorkflowsPath, 'workflows', 'flows', 'compile-test-2.json'), JSON.stringify(workflow2, null, 2) ); // Compile all workflows await deployer.compileAll(true); // Check that files were saved to dist const distFiles = await fs.readdir(path.join(testWorkflowsPath, 'workflows', 'dist')); expect(distFiles).toContain('compile-test-1.json'); expect(distFiles).toContain('compile-test-2.json'); // Verify the compiled workflows have proper structure const compiled1 = JSON.parse( await fs.readFile( path.join(testWorkflowsPath, 'workflows', 'dist', 'compile-test-1.json'), 'utf-8' ) ); expect(compiled1.name).toBe('compile-test-1'); expect(compiled1.description).toBe('First workflow to compile'); expect(compiled1.id).toBe('compile-test-1'); expect(compiled1.updatedAt).toBeDefined(); expect(compiled1.createdAt).toBeDefined(); expect(compiled1.settings).toEqual({ executionOrder: 'v1' }); }); }); });

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/mckinleymedia/mcflow-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server