Skip to main content
Glama
run-test.exp8.96 kB
#!/usr/bin/env expect # Exit codes: # 0 - All tests passed # 1 - Test failure # 3 - API server not accessible # 4 - Invalid arguments # Colors for output set GREEN "\033\[0;32m" set RED "\033\[0;31m" set YELLOW "\033\[1;33m" set BLUE "\033\[0;34m" set NC "\033\[0m" # Get script directory set script_dir [file dirname [file normalize [info script]]] # Get the gbox binary path # Default to using the built binary in the parent directory set gbox_binary "../gbox" # If the built binary doesn't exist, fall back to system gbox if {![file exists $gbox_binary]} { set gbox_binary "gbox" } # Track executed tests set executed_tests {} # Function to clean up all boxes proc cleanup_boxes {} { global gbox_binary GREEN YELLOW BLUE NC puts "\n${BLUE}Clearing all existing boxes after testing...${NC}" if {[catch { spawn $gbox_binary box terminate --all expect { -re "Are you sure.*terminate.*boxes" { puts "${BLUE}Confirming termination of all boxes...${NC}" send "y\r" expect { -re "All boxes terminated" { puts "${GREEN}✓ All boxes terminated successfully${NC}" } -re "No boxes found" { puts "${GREEN}✓ No boxes to terminate${NC}" } -re "failed to terminate" { puts "${YELLOW}⚠ Warning: Some boxes may not have been terminated${NC}" } timeout { puts "${YELLOW}⚠ Warning: Timeout waiting for box termination${NC}" } eof { puts "${GREEN}✓ Box termination command completed${NC}" } } } -re "All boxes terminated" { puts "${GREEN}✓ All boxes terminated successfully${NC}" } -re "No boxes found" { puts "${GREEN}✓ No boxes to terminate${NC}" } -re "failed to terminate" { puts "${YELLOW}⚠ Warning: Some boxes may not have been terminated${NC}" } timeout { puts "${YELLOW}⚠ Warning: Timeout waiting for box termination${NC}" } eof { puts "${GREEN}✓ Box termination command completed${NC}" } } catch {close} } result]} { puts "${YELLOW}⚠ Warning: Box termination failed: $result${NC}" } # Wait for a period of time to ensure that the terminate operation is completely completed. puts "${BLUE}Waiting for box termination to complete...${NC}" sleep 3 puts "\n${GREEN}Test cleanup completed!${NC}" } # Test scripts to run in dependency order # Dependency Graph: # box_create <- box_list # box_create <- box_start <- box_stop # box_create <- box_start <- box_cp # box_create <- box_start <- box_exec # box_create <- box_inspect # box_create <- box_delete set test_scripts { version.exp ;# No dependencies box-create.exp ;# Base command, no dependencies box-list.exp ;# Depends on box_create box-start.exp ;# Depends on box_create box-stop.exp ;# Depends on box_create, box_start box-cp.exp ;# Depends on box_create, box_start box-exec.exp ;# Depends on box_create, box_start box-inspect.exp ;# Depends on box_create box-delete.exp ;# Depends on box_create } # Parse command line arguments set selected_test "" foreach arg $argv { switch -glob -- $arg { "--*" { set selected_test [string range $arg 2 end] } default { puts "Unknown argument: $arg" puts "Usage: $argv0 [--test_name]" puts " --test_name: Run specific test (e.g., --box-create)" exit 4 } } } # Check for API key environment variable if {![info exists env(GBOX_API_KEY)]} { puts "${RED}Error: GBOX_API_KEY environment variable is required for testing${NC}" puts "${YELLOW}Please set GBOX_API_KEY environment variable before running tests${NC}" exit 4 } # Login with API key before running tests puts "${BLUE}Logging in with API key...${NC}" if {[catch { spawn $gbox_binary profile add --key $env(GBOX_API_KEY) expect { -re "Profile added successfully" { puts "${GREEN}✓ Successfully logged in with API key${NC}" } -re "Profile already exists" { puts "${GREEN}✓ Profile already exists, continuing with tests${NC}" } timeout { puts "${RED}✗ Timeout during login${NC}" exit 3 } eof { puts "${RED}✗ Login command exited unexpectedly${NC}" exit 3 } } catch {close} } result]} { puts "${RED}✗ Login failed: $result${NC}" exit 3 } # Run tests set total_tests 0 set passed_tests 0 set case_count 0 if {$selected_test != ""} { # Run specific test # Add .exp extension if not present if {![string match "*.exp" $selected_test]} { set selected_test "$selected_test.exp" } if {[file exists $script_dir/$selected_test]} { if {[lsearch $executed_tests $selected_test] >= 0} { puts "\n${BLUE}Skipping already executed test: $selected_test${NC}" return } incr total_tests puts "\n${YELLOW}========================================${NC}" puts "${YELLOW}Running test: $selected_test${NC}" puts "${YELLOW}========================================${NC}" # Count test cases in the script set fp [open "$script_dir/$selected_test" r] set content [read $fp] close $fp set cases [regexp -all "Testing \[^\n\]*\\.\\.\\." $content] puts "${BLUE}Found $cases test cases${NC}\n" if {[catch { set output [exec $script_dir/$selected_test] puts "${BLUE}Test output:${NC}\n$output" puts "\n${GREEN}Test passed: $selected_test${NC}" } result]} { puts "${BLUE}Test output:${NC}\n$result" puts "\n${RED}Test failed: $selected_test${NC}" puts "${YELLOW}========================================${NC}" cleanup_boxes exit 1 ;# Exit code for test failure } puts "${YELLOW}========================================${NC}" incr passed_tests lappend executed_tests $selected_test } else { puts "\n${RED}Test script not found: $selected_test${NC}" exit 4 } } else { # Run all tests foreach test $test_scripts { if {[file exists $script_dir/$test]} { if {[lsearch $executed_tests $test] >= 0} { puts "\n${BLUE}Skipping already executed test: $test${NC}" continue } incr total_tests puts "\n${YELLOW}========================================${NC}" puts "${YELLOW}Running test: $test${NC}" puts "${YELLOW}========================================${NC}" # Count test cases in the script set fp [open "$script_dir/$test" r] set content [read $fp] close $fp set cases [regexp -all "Testing \[^\n\]*\\.\\.\\." $content] puts "${BLUE}Found $cases test cases${NC}\n" if {[catch { set output [exec $script_dir/$test] puts "${BLUE}Test output:${NC}\n$output" puts "\n${GREEN}Test passed: $test${NC}" } result]} { puts "${BLUE}Test output:${NC}\n$result" puts "\n${RED}Test failed: $test${NC}" puts "${YELLOW}========================================${NC}" cleanup_boxes exit 1 ;# Exit code for test failure } puts "${YELLOW}========================================${NC}" incr passed_tests lappend executed_tests $test } } } # Print summary puts "\n${YELLOW}========================================${NC}" puts "${YELLOW}Test Summary:${NC}" puts "${YELLOW}========================================${NC}" puts "Total test files: $total_tests" puts "Passed files: ${GREEN}$passed_tests${NC}" if {$passed_tests != $total_tests} { puts "Failed files: ${RED}[expr $total_tests - $passed_tests]${NC}" } else { puts "Failed files: 0" } puts "${YELLOW}========================================${NC}" if {$passed_tests == $total_tests} { puts "\n${GREEN}All tests passed successfully!${NC}" } else { puts "\n${RED}Some tests failed.${NC}" } # Clean up all boxes after testing cleanup_boxes if {$passed_tests == $total_tests} { exit 0 } else { exit 1 ;# Exit code for test failure }

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/babelcloud/gru-sandbox'

If you have feedback or need assistance with the MCP directory API, please join our Discord server