Skip to main content
Glama
e2e.yaml9.96 kB
name: E2E on: workflow_dispatch: inputs: git_ref: type: string region: type: choice options: - "us-east-1" - "us-east-2" - "us-west-2" - "eu-west-1" default: "us-east-2" suite: type: choice required: true options: - Integration - NodeClaim - Consolidation - Interruption - Drift - Expiration - Chaos - IPv6 - Scale - PrivateCluster - LocalZone k8s_version: type: choice options: - "1.28" - "1.29" - "1.30" - "1.31" - "1.32" - "1.33" - "1.34" default: "1.34" cluster_name: type: string cleanup: type: boolean required: true default: true enable_metrics: type: boolean default: false codebuild_region: type: string source: type: string default: "aws" workflow_call: inputs: git_ref: type: string source: type: string default: "aws" region: type: string default: "us-east-2" suite: type: string required: true k8s_version: type: string default: "1.34" enable_metrics: type: boolean default: false cleanup: type: boolean required: true workflow_trigger: type: string codebuild_region: type: string cluster_name: type: string description: If cluster_name is empty, a new cluster will be created. Otherwise, tests will run on an existing cluster secrets: SLACK_WEBHOOK_URL: required: false SLACK_WEBHOOK_SOAK_URL: required: false jobs: run-suite: permissions: id-token: write # aws-actions/configure-aws-credentials@v4.0.1 statuses: write # ./.github/actions/commit-status/start name: suite-${{ inputs.suite }} runs-on: ubuntu-latest steps: - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: ref: ${{ inputs.git_ref }} - if: always() && github.event_name == 'workflow_run' uses: ./.github/actions/commit-status/start with: name: ${{ github.workflow }} (${{ inputs.k8s_version }}) / e2e (${{ inputs.suite }}) git_ref: ${{ inputs.git_ref }} - uses: ./.github/actions/install-deps - name: configure aws credentials uses: aws-actions/configure-aws-credentials@a03048d87541d1d9fcf2ecf528a4a65ba9bd7838 # v5.0.0 with: role-to-assume: arn:aws:iam::${{ vars.CI_ACCOUNT_ID }}:role/${{ vars.CI_ROLE_NAME }} aws-region: ${{ inputs.region }} role-duration-seconds: 21600 - name: add jitter on cluster setup if: github.repository == 'aws/karpenter-provider-aws' run: | # Creating jitter so that we can stagger cluster creation to avoid throttling sleep $(( RANDOM % 300 + 1 )) - id: generate-cluster-name name: generate cluster name env: SUITE: ${{ inputs.suite }} CLUSTER_NAME: ${{ inputs.cluster_name }} WORKFLOW_TRIGGER: ${{ inputs.workflow_trigger }} run: | if [[ "$CLUSTER_NAME" == '' ]]; then if [[ "$WORKFLOW_TRIGGER" == 'soak' ]]; then CLUSTER_NAME=$(echo "soak-periodic-$RANDOM$RANDOM" | awk '{print tolower($0)}' | tr / -) else CLUSTER_NAME=$(echo "$SUITE-$RANDOM$RANDOM" | awk '{print tolower($0)}' | tr / -) fi fi echo "Using cluster name \"$CLUSTER_NAME\"" echo CLUSTER_NAME="$CLUSTER_NAME" >> "$GITHUB_OUTPUT" - name: setup eks cluster '${{ steps.generate-cluster-name.outputs.CLUSTER_NAME }}' if: inputs.cluster_name == '' uses: ./.github/actions/e2e/setup-cluster with: account_id: ${{ vars.CI_ACCOUNT_ID }} role: ${{ vars.CI_ROLE_NAME }} region: ${{ inputs.region }} cluster_name: ${{ steps.generate-cluster-name.outputs.CLUSTER_NAME }} k8s_version: ${{ inputs.k8s_version }} eksctl_version: v0.202.0 ip_family: ${{ contains(inputs.suite, 'IPv6') && 'IPv6' || 'IPv4' }} # Set the value to IPv6 if IPv6 suite, else IPv4 private_cluster: ${{ inputs.workflow_trigger == 'private_cluster' }} git_ref: ${{ inputs.git_ref }} ecr_account_id: ${{ vars.SNAPSHOT_ACCOUNT_ID }} ecr_region: ${{ vars.SNAPSHOT_REGION }} prometheus_workspace_id: ${{ vars.WORKSPACE_ID }} prometheus_region: ${{ vars.PROMETHEUS_REGION }} enable_local_zones: ${{ inputs.suite == 'LocalZone' }} cleanup: ${{ inputs.cleanup }} codebuild_role: ${{ vars[format('{0}_CODEBUILD_ROLE', inputs.codebuild_region)] }} - name: run tests for private cluster if: ${{ inputs.workflow_trigger == 'private_cluster' }} uses: ./.github/actions/e2e/run-tests-private-cluster with: cluster_name: ${{ steps.generate-cluster-name.outputs.CLUSTER_NAME }} suite: ${{ inputs.suite }} prometheus_region: ${{ vars.PROMETHEUS_REGION }} prometheus_workspace_id: ${{ vars.WORKSPACE_ID }} region: ${{ inputs.region }} account_id: ${{ vars.CI_ACCOUNT_ID }} k8s_version: ${{ inputs.k8s_version }} ecr_account_id: ${{ vars.SNAPSHOT_ACCOUNT_ID }} ecr_region: ${{ vars.SNAPSHOT_REGION }} private_cluster: ${{ inputs.workflow_trigger == 'private_cluster' }} enable_metrics: ${{ inputs.enable_metrics }} metrics_region: ${{ vars.TIMESTREAM_REGION }} node_role: ${{ env.NODE_ROLE }} cleanup: ${{ inputs.cleanup }} codebuild_sg: ${{ vars[format('{0}_CODEBUILD_SG', inputs.codebuild_region)] }} codebuild_vpc: ${{ vars[format('{0}_CODEBUILD_VPC', inputs.codebuild_region)] }} - name: run the ${{ inputs.suite }} test suite if: ${{ inputs.workflow_trigger != 'private_cluster' }} env: SUITE: ${{ inputs.suite }} ENABLE_METRICS: ${{ inputs.enable_metrics }} SOURCE: ${{ inputs.source }} run: | aws eks update-kubeconfig --name ${{ steps.generate-cluster-name.outputs.CLUSTER_NAME }} # Clean up the cluster before running all tests kubectl delete nodepool --all kubectl delete ec2nodeclass --all kubectl delete deployment --all if [[ "$SOURCE" == 'aws' ]]; then TEST_SUITE="$SUITE" ENABLE_METRICS=$ENABLE_METRICS METRICS_REGION=${{ vars.TIMESTREAM_REGION }} GIT_REF="$(git rev-parse HEAD)" \ CLUSTER_NAME="${{ steps.generate-cluster-name.outputs.CLUSTER_NAME }}" CLUSTER_ENDPOINT="$(aws eks describe-cluster --name ${{ steps.generate-cluster-name.outputs.CLUSTER_NAME }} --query "cluster.endpoint" --output text)" \ INTERRUPTION_QUEUE="${{ steps.generate-cluster-name.outputs.CLUSTER_NAME }}" make e2etests elif [[ "$SOURCE" == 'upstream' ]]; then FOCUS="$SUITE" GIT_REF="$(git rev-parse HEAD)" \ CLUSTER_NAME="${{ steps.generate-cluster-name.outputs.CLUSTER_NAME }}" make upstream-e2etests elif [[ "$SOURCE" == 'all' ]]; then TEST_SUITE="$SUITE" ENABLE_METRICS=$ENABLE_METRICS METRICS_REGION=${{ vars.TIMESTREAM_REGION }} GIT_REF="$(git rev-parse HEAD)" \ CLUSTER_NAME="${{ steps.generate-cluster-name.outputs.CLUSTER_NAME }}" CLUSTER_ENDPOINT="$(aws eks describe-cluster --name ${{ steps.generate-cluster-name.outputs.CLUSTER_NAME }} --query "cluster.endpoint" --output text)" \ INTERRUPTION_QUEUE="${{ steps.generate-cluster-name.outputs.CLUSTER_NAME }}" make e2etests FOCUS="$SUITE" GIT_REF="$(git rev-parse HEAD)" \ CLUSTER_NAME="${{ steps.generate-cluster-name.outputs.CLUSTER_NAME }}" make upstream-e2etests fi - name: notify slack of success or failure uses: ./.github/actions/e2e/slack/notify if: (success() || failure()) && github.event_name != 'workflow_run' && inputs.workflow_trigger != 'versionCompatibility' with: cluster_name: ${{ steps.generate-cluster-name.outputs.CLUSTER_NAME }} url: ${{ inputs.workflow_trigger == 'soak' && secrets.SLACK_WEBHOOK_SOAK_URL || secrets.SLACK_WEBHOOK_URL }} suite: ${{ inputs.suite }} git_ref: ${{ inputs.git_ref }} workflow_trigger: ${{ inputs.workflow_trigger }} - name: dump logs on failure uses: ./.github/actions/e2e/dump-logs if: (failure() || cancelled()) && inputs.workflow_trigger != 'private_cluster' with: account_id: ${{ vars.CI_ACCOUNT_ID }} role: ${{ vars.CI_ROLE_NAME }} region: ${{ inputs.region }} cluster_name: ${{ steps.generate-cluster-name.outputs.CLUSTER_NAME }} - name: cleanup karpenter and cluster '${{ steps.generate-cluster-name.outputs.CLUSTER_NAME }}' resources uses: ./.github/actions/e2e/cleanup if: always() && inputs.cleanup with: account_id: ${{ vars.CI_ACCOUNT_ID }} role: ${{ vars.CI_ROLE_NAME }} region: ${{ inputs.region }} cluster_name: ${{ steps.generate-cluster-name.outputs.CLUSTER_NAME }} git_ref: ${{ inputs.git_ref }} eksctl_version: v0.202.0 private_cluster: ${{ inputs.workflow_trigger == 'private_cluster' }} - if: always() && github.event_name == 'workflow_run' uses: ./.github/actions/commit-status/end with: name: ${{ github.workflow }} (${{ inputs.k8s_version }}) / e2e (${{ inputs.suite }}) git_ref: ${{ inputs.git_ref }}

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/mengfwan/test-mcp-glama'

If you have feedback or need assistance with the MCP directory API, please join our Discord server