#!/bin/bash
# setup-perform-e2e-test.sh
# Sets up the perform-e2e-test command workflow for Spec-Driven Development
# This script prepares the test execution environment and validates prerequisites
set -euo pipefail
# Get the directory where this script is located
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Source common utilities
source "$SCRIPT_DIR/common.sh"
# Main setup function
setup_perform_e2e_test() {
local repo_root
repo_root=$(get_repo_root)
# Navigate to repository root
cd "$repo_root" || exit 1
# Define paths
local docs_dir="$repo_root/docs"
local e2e_test_plan="$docs_dir/e2e-test-plan.md"
local test_results_dir="$repo_root/test-results"
local tests_e2e_dir="$repo_root/tests/e2e"
local ground_rules_file="$repo_root/memory/ground-rules.md"
local architecture_file="$docs_dir/architecture.md"
# Generate timestamp for this test run
local timestamp
timestamp=$(date +"%Y%m%d_%H%M%S")
# Check for required files
if [ ! -f "$e2e_test_plan" ]; then
print_error "E2E test plan not found at: $e2e_test_plan"
print_info "Please run the design-e2e-test command first."
exit 1
fi
if [ ! -f "$ground_rules_file" ]; then
print_warning "Ground-rules file not found at: $ground_rules_file"
fi
if [ ! -f "$architecture_file" ]; then
print_warning "Architecture file not found at: $architecture_file"
fi
# Create test results directory structure
print_info "Setting up test results directory..."
mkdir -p "$test_results_dir"/{screenshots,videos,logs,archive}
# Create tests/e2e directory structure if it doesn't exist
if [ ! -d "$tests_e2e_dir" ]; then
print_info "Creating E2E test directory structure..."
mkdir -p "$tests_e2e_dir"/{scenarios,pages,helpers,fixtures,config}
# Create basic README
cat > "$tests_e2e_dir/README.md" <<EOF
# End-to-End Tests
This directory contains end-to-end test scripts generated from the E2E test plan.
## Structure
- \`scenarios/\` - Test scenario implementations
- \`pages/\` - Page Object Models
- \`helpers/\` - Test utility functions
- \`fixtures/\` - Test data fixtures
- \`config/\` - Test configuration files
## Running Tests
See \`docs/e2e-test-plan.md\` for test execution instructions.
## Generated by
Hanoi Rainbow \`perform-e2e-test\` command
EOF
print_success "Created: $tests_e2e_dir"
fi
# Parse test execution mode from arguments
local test_mode="smoke" # default
if [ $# -gt 0 ]; then
local arg_lower=$(echo "$1" | tr '[:upper:]' '[:lower:]')
case "$arg_lower" in
smoke|regression|full)
test_mode="$arg_lower"
;;
*)
test_mode="smoke"
;;
esac
fi
# Count test scenarios in the plan
local total_scenarios=0
if [ -f "$e2e_test_plan" ]; then
total_scenarios=$(grep -c "^#### Scenario" "$e2e_test_plan" 2>/dev/null || echo "0")
fi
# Detect test framework from plan
local test_framework=""
if [ -f "$e2e_test_plan" ]; then
test_framework=$(detect_test_framework "$e2e_test_plan")
fi
# Detect AI agent
local detected_agent
detected_agent=$(detect_ai_agent "$repo_root")
# Check if test framework is installed
local framework_installed="false"
if command -v npx &> /dev/null; then
case "$test_framework" in
"Playwright")
if npx playwright --version &> /dev/null; then
framework_installed="true"
fi
;;
"Cypress")
if npx cypress --version &> /dev/null; then
framework_installed="true"
fi
;;
esac
fi
# Generate JSON output for AI agents
generate_json_output "$e2e_test_plan" "$test_results_dir" "$timestamp" "$test_mode" "$total_scenarios" "$test_framework" "$framework_installed" "$detected_agent"
# Print human-readable summary
print_summary "$e2e_test_plan" "$test_results_dir" "$timestamp" "$test_mode" "$total_scenarios" "$test_framework" "$framework_installed"
}
# Detect test framework from E2E test plan
detect_test_framework() {
local plan_file="$1"
local framework="Unknown"
if grep -qi "playwright" "$plan_file"; then
framework="Playwright"
elif grep -qi "cypress" "$plan_file"; then
framework="Cypress"
elif grep -qi "selenium" "$plan_file"; then
framework="Selenium"
elif grep -qi "appium" "$plan_file"; then
framework="Appium"
fi
echo "$framework"
}
# Generate JSON output for AI agent consumption
generate_json_output() {
local e2e_test_plan="$1"
local test_results_dir="$2"
local timestamp="$3"
local test_mode="$4"
local total_scenarios="$5"
local test_framework="$6"
local framework_installed="$7"
local ai_agent="$8"
local result_file="$test_results_dir/e2e-test-result_${timestamp}.md"
cat <<EOF
{
"command": "perform-e2e-test",
"status": "ready",
"timestamp": "$timestamp",
"test_mode": "$test_mode",
"e2e_test_plan": "$e2e_test_plan",
"test_results_dir": "$test_results_dir",
"result_file": "$result_file",
"total_scenarios": $total_scenarios,
"detected_test_framework": "$test_framework",
"framework_installed": $framework_installed,
"detected_ai_agent": "$ai_agent",
"execution_modes": {
"smoke": "P0 critical scenarios only (~10-15 min)",
"regression": "P0 + P1 scenarios (~1 hour)",
"full": "All scenarios P0-P3 (~3-4 hours)"
},
"prerequisites": {
"e2e_test_plan": "$e2e_test_plan",
"test_framework": "$test_framework",
"framework_installed": $framework_installed
},
"next_steps": [
"Parse E2E test plan to extract test scenarios",
"Generate test scripts from scenarios (if not exists)",
"Set up test environment and test data",
"Execute $test_mode test suite",
"Capture test results, screenshots, and logs",
"Generate detailed test result report: $result_file",
"Analyze failures and provide recommendations",
"Clean up test environment"
],
"output_files": {
"main_report": "$result_file",
"screenshots_dir": "$test_results_dir/screenshots",
"videos_dir": "$test_results_dir/videos",
"logs_dir": "$test_results_dir/logs",
"json_results": "$test_results_dir/results.json",
"junit_xml": "$test_results_dir/junit.xml"
},
"test_suite_breakdown": {
"smoke": {
"priority": "P0",
"estimated_duration": "10-15 minutes",
"use_case": "Quick validation, every commit"
},
"regression": {
"priority": "P0 + P1",
"estimated_duration": "1 hour",
"use_case": "Daily/nightly builds"
},
"full": {
"priority": "P0 + P1 + P2 + P3",
"estimated_duration": "3-4 hours",
"use_case": "Weekly, pre-release"
}
}
}
EOF
}
# Print human-readable summary
print_summary() {
local e2e_test_plan="$1"
local test_results_dir="$2"
local timestamp="$3"
local test_mode="$4"
local total_scenarios="$5"
local test_framework="$6"
local framework_installed="$7"
echo ""
print_success "=== Perform E2E Test Setup Complete ==="
echo ""
print_info "Test Run Timestamp: $timestamp"
print_info "Test Mode: $test_mode"
print_info "E2E Test Plan: $e2e_test_plan"
print_info "Test Results Directory: $test_results_dir"
print_info "Total Scenarios in Plan: $total_scenarios"
print_info "Detected Test Framework: $test_framework"
if [ "$framework_installed" = "true" ]; then
print_success "✓ Test Framework Installed"
else
print_warning "⚠ Test Framework Not Detected - May need installation"
fi
echo ""
print_info "Test Suite Modes:"
echo " • smoke: P0 critical scenarios only (~10-15 min)"
echo " • regression: P0 + P1 scenarios (~1 hour)"
echo " • full: All scenarios P0-P3 (~3-4 hours)"
echo ""
print_info "Result File Will Be:"
echo " $test_results_dir/e2e-test-result_${timestamp}.md"
echo ""
print_info "Next Steps:"
echo " 1. Parse E2E test plan to extract scenarios"
echo " 2. Generate test scripts from scenarios (if needed)"
echo " 3. Set up test environment and seed test data"
echo " 4. Execute $test_mode test suite"
echo " 5. Capture results, screenshots, and logs"
echo " 6. Generate detailed test result report"
echo " 7. Analyze failures and provide recommendations"
echo " 8. Clean up test environment"
echo ""
if [ "$framework_installed" = "false" ]; then
print_warning "NOTE: Test framework may need to be installed before execution"
echo " Install commands (if needed):"
echo " npm install -D @playwright/test"
echo " npm install -D cypress"
echo ""
fi
print_info "To execute E2E tests with AI assistance, run:"
echo " perform-e2e-test $test_mode"
echo ""
}
# Execute main setup with optional test mode argument
setup_perform_e2e_test "$@"