Skip to main content
Glama
upgrade8.78 kB
#!/bin/bash # --------------------------------------------------------------------------------------------------- # Identify all the machines specifically which are upgrade-able via this method # SSM/Shells onto all the boxes to check if there is an upgrade available, then offer the user to upgrade # All the endpoints to the latest version of stable if there is an update available. # Everytime an SSM command is executed against a host a record of it is pushed into a # ./results/<uuid>/<result>.json file + once all the commands are completed an aggregated file will be # created in that directory too. If the json output from the SSM executions is not enough to debug # just look in AWS and you'll see the whole execution history in SSM Command Execution History. # --------------------------------------------------------------------------------------------------- # Stop immediately if anything goes wrong, let's not create too much # mess if John's shell is poor set -eo pipefail # Find & Import all the supporting functions from the supporting folder # Get the directory of the current script to figure out where the # Supporting funcs are IMPORT_DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) for script in ${IMPORT_DIR}/supporting-funcs/*.sh; do if [[ -f "$script" ]]; then source "$script" fi done # Usage for this script usage() { echo title "upgrade" section "----------------------------------" echo "This script will open an SSM session to all available" echo "nodes in the region and will check whether they have an" echo "upgrade available, if so the user can proceed and upgrade" echo "them all in parallel" section "----------------------------------" usage_text "Usage:" "upgrade [-p profile] [-r region] [-a automatic] [-s service]" echo " $(option "-p profile") [pull-from-env/<profile-name>] AWS profile to use" echo " $(option "-r region") AWS region to use" echo " $(option "-s service") [sdf/rebaser/pinga/veritech/forklift/edda] SI Service to filter by, defaults to all" echo " $(option "-e environment") [tools,produciton] Where the services are running" echo " $(option "-a automatic") [Y/N] Run through automatically/no-interact" section "----------------------------------" example "e.g. ./awsi.sh upgrade -p pull-from-env -r us-east-1 -s sdf -e tools -a y" exit 0 } # Add a check to see if the script is being sourced or executed if [[ "${BASH_SOURCE[0]}" != "${0}" ]]; then usage fi # Parse flags while getopts ":p:r:a:s:e:" opt; do case ${opt} in p) profile=$OPTARG ;; r) region=$OPTARG ;; a) automatic=$OPTARG ;; s) service=$OPTARG ;; e) environment=$OPTARG ;; \?) echo "Invalid option: -$OPTARG" >&2 usage ;; :) echo "Option -$OPTARG requires an argument." >&2 usage ;; esac done # --------------------------------------------------------------------------------------------------- # Main script # --------------------------------------------------------------------------------------------------- echo "$0 being invoked" # Use the profile if in the invocation if [[ "$profile" != "pull-from-env" ]]; then profile=$(get_param_or_env "$profile" "AWS_PROFILE" "Enter the AWS profile to use") export AWS_PROFILE="$profile" fi region=$(get_param_or_env "$region" "AWS_REGION" "Enter the AWS region (e.g., us-west-2)") export AWS_REGION="$region" # List instances with fixed-width columns and filter for the upgradeable instances # Based on the filter provided instances=$(list_instances ${service,,} ) if [ -z "$instances" ]; then echo "No running instances found." exit 1 fi echo "----------------------------------------" echo "Running instances of services in the region $region:" printf "%-5s %-20s %-20s %-20s %-20s\n" "Index" "Name" "InstanceId" "InstanceType" "PrivateIpAddress" i=1 while read -r line; do name=$(echo "$line" | awk '{print $1}') instance_id=$(echo "$line" | awk '{print $2}') instance_type=$(echo "$line" | awk '{print $3}') private_ip=$(echo "$line" | awk '{print $4}') printf "%-5s %-20s %-20s %-20s %-20s\n" "$i" "$name" "$instance_id" "$instance_type" "$private_ip" ((i++)) done <<< "$instances" echo "----------------------------------------" [[ "${automatic,,}" == "y" ]] || read -p "Would you like to see if an SI binary upgrade is available to these hosts? (Y/N) [takes ~30 seconds] " selection [[ "${automatic,,}" == "y" ]] || sassy_selection_check $selection # Setup somewhere unique to push the results of the check into if they chose to continue # Reset this results_directory variable between each execution run. results_directory="./results/$(date +"%Y-%m-%d_%H-%M-%S")" check_results_file=check_results.json start_results_file=start_results.json stop_results_file=stop_results.json upgrade_results_file=upgrade_results.json mkdir -p "$results_directory/" i=1 while read -r line; do instance_id=$(echo "$line" | awk '{print $2}') service_name=$(echo "$line" | awk '{print $1}' | awk -F- '{print $2}') start_and_track_ssm_session "$instance_id" "$upgrade_check_script" "$results_directory" "InstanceId=$instance_id,Service=$service_name,Environment=$environment" & ((i++)) done <<< "$instances" await_file_results "$results_directory" $((i - 1)) concat_and_output_json "$results_directory" "$check_results_file" if jq -e 'all(.[]; .status == "success") and any(.[]; .upgradeable == "true")' "$results_directory/$check_results_file" > /dev/null; then [[ "${automatic,,}" == "y" ]] || read -p "Would you like to push the new binaries out to the upgradeable hosts? (Y/N) " selection [[ "${automatic,,}" == "y" ]] || sassy_selection_check $selection elif jq -e 'all(.[]; .status == "success") and all(.[]; .upgradeable == "false")' "$results_directory/$check_results_file" > /dev/null; then echo "Info: There is no upgrade available from the stable track right now for any of the selected nodes, see above output" exit 0 else echo "Error: One or more of the checks failed to determine whether it was possible to upgrade the node." exit 2 fi # For all order 1 services, upgrade them in *sequence* # i.e.: For Veritech, this means that we will always have at least 50% execution capacity as one node will # always be functioning upgrade_candidates_json=$(cat $results_directory/$check_results_file) # reset the results_directory variable for the next set of results results_directory="./results/$(date +"%Y-%m-%d_%H-%M-%S")" mkdir -p $results_directory upgrade_hosts_num=$(jq 'map(select(.service == "veritech")) | .[]' <<< $upgrade_candidates_json | jq -c '.' | wc -l) jq 'map(select(.service == "veritech")) | .[]' <<< $upgrade_candidates_json | jq -c '.' | while read -r line; do instance_id=$(echo "$line" | jq -r '.instance_id') service_name=$(echo "$line" | jq -r '.service') start_and_track_ssm_session "$instance_id" "$service_state_script" "$results_directory" "InstanceId=$instance_id,Service=$service_name,Action=upgrade,Environment=$environment" done # Wait until all the results arrive await_file_results "$results_directory" "$upgrade_hosts_num" # Continue with the rest of the service nodes upgrade_hosts_num=$(jq 'map(select(.service != "veritech")) | .[]' <<< $upgrade_candidates_json | jq -c '.' | wc -l) jq 'map(select(.service != "veritech")) | .[]' <<< $upgrade_candidates_json | jq -c '.' | while read -r line; do instance_id=$(echo "$line" | jq -r '.instance_id') service_name=$(echo "$line" | jq -r '.service') start_and_track_ssm_session "$instance_id" "$service_state_script" "$results_directory" "InstanceId=$instance_id,Service=$service_name,Action=upgrade,Environment=$environment" & ((i++)) done # Concatenate all the results together upgrade_hosts_num=$(jq '.[]' <<< $upgrade_candidates_json | jq -c '.' | wc -l) await_file_results "$results_directory" "$upgrade_hosts_num" concat_and_output_json "$results_directory" "$upgrade_results_file" echo "All active binary services of ${service} have been rotated" echo "*******WEB IS NOT UPDATED VIA THIS TOOLING**************" echo "If you are running in CI, this will be ran automatically for you. If you" echo "executed this manually/locally, you need to run the below pipeline, with" echo "either tools or production, depending which environment you're executing" echo "this tool against with service set to web and version as stable." echo "https://github.com/systeminit/si/actions/workflows/deploy-stack.yml" echo "----------------------------------------"

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/systeminit/si'

If you have feedback or need assistance with the MCP directory API, please join our Discord server