diff --git a/.cloudbees/workflows/bp-tf-cd.yaml b/.cloudbees/workflows/bp-tf-cd.yaml new file mode 100644 index 00000000..e945926f --- /dev/null +++ b/.cloudbees/workflows/bp-tf-cd.yaml @@ -0,0 +1,256 @@ +# Copyright (c) CloudBees, Inc. + +# Stages +# CD: deploy,validate,onboarding +# Nuke: wipeout + +apiVersion: automation.cloudbees.io/v1alpha1 +kind: workflow +name: ci + +on: + workflow_dispatch: + +env: + AWS_REGION_TF_BUCKET: "us-east-1" + BUCKET_NAME_TF_STATE: "cbci-eks-addon-tf-state-v2" + AWS_ROLE_TO_ASSUME: "infra-admin-ci" + TF_VAR_suffix: "ci-v11" + TF_VAR_aws_region: "us-west-2" + TF_AUTO_VARS_FILE: | + tags = { + "cb-owner" : "professional-services" + "cb-user" : "cb-platform" + "cb-purpose" : "cd" + } + trial_license = { + first_name = "CloudBees.io" + last_name = "Platform" + email = "ci.user@cloudbees.io" + company = "CloudBees Inc." + } + ci = true + +jobs: + init: + steps: + + - name: Configure AWS Credentials + uses: cloudbees-io/configure-aws-credentials@v1 + with: + aws-region: ${{ env.AWS_REGION_TF_BUCKET }} + aws-access-key-id: ${{ secrets.AWS_TF_CBCI_EKS_AccessKeyID }} + aws-secret-access-key: ${{ secrets.AWS_TF_CBCI_EKS_SecretAccessKey }} + role-to-assume: ${{ env.AWS_ROLE_TO_ASSUME }} + role-external-id: cloudbees + role-duration-seconds: "3600" + + #TODO: Add tags for the bucket + - name: Create Terraform Backend Bucket if not exists + uses: docker://public.ecr.aws/r1n1q0e5/cloudbees-labs/tf-aws-cb-ci-eks-addon-agent:latest + shell: bash + run: | + set -x + aws s3api create-bucket \ + --bucket ${{ env.BUCKET_NAME_TF_STATE }} \ + --region ${{ env.AWS_REGION_TF_BUCKET }} || echo "Bucket ${{ env.BUCKET_NAME_TF_STATE }} already exists" + + bp01: + env: + ROOT: 01-getting-started + TF_VAR_hosted_zone: bp01.aws.ps.beescloud.com + STAGES: "wipeout" + needs: + - init + steps: + + - name: Configure AWS Credentials + uses: cloudbees-io/configure-aws-credentials@v1 + with: + aws-region: ${{ env.TF_VAR_aws_region }} + aws-access-key-id: ${{ secrets.AWS_TF_CBCI_EKS_AccessKeyID }} + aws-secret-access-key: ${{ secrets.AWS_TF_CBCI_EKS_SecretAccessKey }} + role-to-assume: ${{ env.AWS_ROLE_TO_ASSUME }} + role-external-id: cloudbees + role-duration-seconds: "3600" + + - name: Checkout code + uses: cloudbees-io/checkout@v1 + + - name: 01-getting-started - Set + uses: docker://public.ecr.aws/r1n1q0e5/cloudbees-labs/tf-aws-cb-ci-eks-addon-agent:latest + shell: bash + run : | + cat <> blueprints/${{ env.ROOT }}/.auto.tfvars + ${{ env.TF_AUTO_VARS_FILE }} + EOT + cat blueprints/${{ env.ROOT }}/.auto.tfvars + cat <> blueprints/${{ env.ROOT }}/backend.tf + terraform { + backend "s3" { + bucket = "${{ env.BUCKET_NAME_TF_STATE }}" + key = "${{ env.ROOT }}/ci.terraform.tfstate" + region = "${{ env.AWS_REGION_TF_BUCKET }}" + } + } + EOT + + - name: 01-getting-started - Deploy + uses: docker://public.ecr.aws/r1n1q0e5/cloudbees-labs/tf-aws-cb-ci-eks-addon-agent:latest + if: contains(env.STAGES, 'deploy') + shell: bash + run : | + set -x + aws kms delete-alias --alias-name alias/eks/cbci-bp01-${{ env.TF_VAR_suffix }}-eks --region ${{ env.TF_VAR_aws_region }} || echo "Alias alias/eks/cbci-bp01-${{ env.TF_VAR_suffix }}-eks does not exist" + aws kms delete-alias --alias-name alias/eks/cbci-bp01-${{ env.TF_VAR_suffix }} --region ${{ env.TF_VAR_aws_region }} || echo "Alias alias/eks/cbci-bp01-${{ env.TF_VAR_suffix }} does not exist" + CI=true make deploy + aws s3api put-object \ + --bucket ${{ env.BUCKET_NAME_TF_STATE }} \ + --region ${{ env.AWS_REGION_TF_BUCKET }} \ + --key ${{ env.ROOT }}/${{ env.ROOT }}.terraform.output \ + --body blueprints/${{ env.ROOT }}/terraform.output + + - name: 01-getting-started - Validate + uses: docker://public.ecr.aws/r1n1q0e5/cloudbees-labs/tf-aws-cb-ci-eks-addon-agent:latest + if: contains(env.STAGES, 'validate') + shell: bash + run : | + CI=true make validate + + - name: 01-getting-started - Destroy + uses: docker://public.ecr.aws/r1n1q0e5/cloudbees-labs/tf-aws-cb-ci-eks-addon-agent:latest + if: contains(env.STAGES, 'destroy') + shell: bash + run : | + CI=true make destroy + + - name: 01-getting-started - Wipeout + uses: docker://public.ecr.aws/r1n1q0e5/cloudbees-labs/tf-aws-cb-ci-eks-addon-agent:latest + if: contains(env.STAGES, 'wipeout') + shell: bash + run : | + terraform -chdir=blueprints/${{ env.ROOT }} init -reconfigure && CI=true make destroy + + - name: 01-getting-started - Role Onboarding + uses: docker://public.ecr.aws/r1n1q0e5/cloudbees-labs/tf-aws-cb-ci-eks-addon-agent:latest + if: contains(env.STAGES, 'onboarding') + env: + TARGET_ROLE: arn:aws:iam::324005994172:role/AWSReservedSSO_infra-admin_256addbf79cfacd1 + shell: bash + run : | + set -x + cd blueprints/${{ env.ROOT }} && eval $(terraform output --raw kubeconfig_export) + kubectl describe configmap aws-auth -n kube-system + eksctl create iamidentitymapping \ + --cluster $(terraform output --raw eks_cluster_name) \ + --region ${{ env.TF_VAR_aws_region }} \ + --arn ${{ env.TARGET_ROLE }} \ + --username k8s-admin-rol \ + --group system:masters + kubectl describe configmap aws-auth -n kube-system + + bp02: + env: + ROOT: 02-at-scale + TF_VAR_hosted_zone: bp02.aws.ps.beescloud.com + STAGES: "wipeout" + needs: + - init + steps: + + - name: Configure AWS Credentials + uses: cloudbees-io/configure-aws-credentials@v1 + with: + aws-region: ${{ env.TF_VAR_aws_region }} + aws-access-key-id: ${{ secrets.AWS_TF_CBCI_EKS_AccessKeyID }} + aws-secret-access-key: ${{ secrets.AWS_TF_CBCI_EKS_SecretAccessKey }} + role-to-assume: ${{ env.AWS_ROLE_TO_ASSUME }} + role-external-id: cloudbees + role-duration-seconds: "3600" + + - name: Checkout code + uses: cloudbees-io/checkout@v1 + + - name: 02-at-scale - Set + uses: docker://public.ecr.aws/r1n1q0e5/cloudbees-labs/tf-aws-cb-ci-eks-addon-agent:latest + shell: bash + run : | + cat <> blueprints/${{ env.ROOT }}/.auto.tfvars + ${{ env.TF_AUTO_VARS_FILE }} + dh_reg_secret_auth = { + username = "${{ secrets.AWS_TF_CBCI_EKS_DHUser }}" + password = "${{ secrets.AWS_TF_CBCI_EKS_DHPass }}" + email = "${{ secrets.AWS_TF_CBCI_EKS_DHMail }}" + } + EOT + cat blueprints/${{ env.ROOT }}/.auto.tfvars + cat <> blueprints/${{ env.ROOT }}/backend.tf + terraform { + backend "s3" { + bucket = "${{ env.BUCKET_NAME_TF_STATE }}" + key = "${{ env.ROOT }}/ci.terraform.tfstate" + region = "${{ env.AWS_REGION_TF_BUCKET }}" + } + } + EOT + + - name: 02-at-scale - Deploy + uses: docker://public.ecr.aws/r1n1q0e5/cloudbees-labs/tf-aws-cb-ci-eks-addon-agent:latest + if: contains(env.STAGES, 'deploy') + shell: bash + run : | + set -x + aws kms delete-alias --alias-name alias/eks/cbci-bp02-${{ env.TF_VAR_suffix }}-eks --region ${{ env.TF_VAR_aws_region }} || echo "Alias alias/eks/cbci-bp02-${{ env.TF_VAR_suffix }}-eks does not exist" + aws kms delete-alias --alias-name alias/eks/cbci-bp02-${{ env.TF_VAR_suffix }} --region ${{ env.TF_VAR_aws_region }} || echo "Alias alias/eks/cbci-bp02-${{ env.TF_VAR_suffix }} does not exist" + CI=true make deploy + aws s3api put-object \ + --bucket ${{ env.BUCKET_NAME_TF_STATE }} \ + --region ${{ env.AWS_REGION_TF_BUCKET }} \ + --key ${{ env.ROOT }}/${{ env.ROOT }}.terraform.output \ + --body blueprints/${{ env.ROOT }}/terraform.output + cd blueprints/${{ env.ROOT }} && eval $(terraform output --raw kubeconfig_export) + cd blueprints/${{ env.ROOT }} && eval $(terraform output --raw vault_init) + aws s3api put-object \ + --bucket ${{ env.BUCKET_NAME_TF_STATE }} \ + --region ${{ env.AWS_REGION_TF_BUCKET }} \ + --key ${{ env.ROOT }}/${{ env.ROOT }}.vault.init.log \ + --body $(cd blueprints/${{ env.ROOT }} && terraform output --raw vault_init_log_file) || echo "No vault-init.log found" + + - name: 02-at-scale - Validate + uses: docker://public.ecr.aws/r1n1q0e5/cloudbees-labs/tf-aws-cb-ci-eks-addon-agent:latest + if: contains(env.STAGES, 'validate') + shell: bash + run : | + CI=true make validate + + - name: 02-at-scale - Destroy + uses: docker://public.ecr.aws/r1n1q0e5/cloudbees-labs/tf-aws-cb-ci-eks-addon-agent:latest + if: contains(env.STAGES, 'destroy') + shell: bash + run : | + CI=true make destroy + + - name: 02-at-scale - Wipeout + uses: docker://public.ecr.aws/r1n1q0e5/cloudbees-labs/tf-aws-cb-ci-eks-addon-agent:latest + if: contains(env.STAGES, 'wipeout') + shell: bash + run : | + terraform -chdir=blueprints/${{ env.ROOT }} init -reconfigure && CI=true make destroy + + - name: 02-at-scale - Role Onboarding + uses: docker://public.ecr.aws/r1n1q0e5/cloudbees-labs/tf-aws-cb-ci-eks-addon-agent:latest + if: contains(env.STAGES, 'onboarding') + env: + TARGET_ROLE: arn:aws:iam::324005994172:role/AWSReservedSSO_infra-admin_256addbf79cfacd1 + shell: bash + run : | + set -x + cd blueprints/${{ env.ROOT }} && eval $(terraform output --raw kubeconfig_export) + kubectl describe configmap aws-auth -n kube-system + eksctl create iamidentitymapping \ + --cluster $(terraform output --raw eks_cluster_name) \ + --region ${{ env.TF_VAR_aws_region }} \ + --arn ${{ env.TARGET_ROLE }} \ + --username k8s-admin-rol \ + --group system:masters + kubectl describe configmap aws-auth -n kube-system diff --git a/.cloudbees/workflows/bp-tf-ci.yaml b/.cloudbees/workflows/bp-tf-ci.yaml index 9e95fa3f..8e57b247 100644 --- a/.cloudbees/workflows/bp-tf-ci.yaml +++ b/.cloudbees/workflows/bp-tf-ci.yaml @@ -1,20 +1,29 @@ # Copyright (c) CloudBees, Inc. +# Stages +# CI: deploy,validate,destroy +# Troubleshooting: deploy,validate,onboarding +# Nuke (Delete Dangling resource): wipeout + apiVersion: automation.cloudbees.io/v1alpha1 kind: workflow name: ci on: - # Enable once Issue #83 is solved + # Enable once Issue #83 is solved (Disable Manual trigger. workflow_dispatch) + # It requires + # pull_request: # branches: # - 'main' workflow_dispatch: env: - AWS_REGION: "us-east-1" + AWS_REGION_TF_BUCKET: "us-east-1" BUCKET_NAME_TF_STATE: "cbci-eks-addon-tf-state-v2" AWS_ROLE_TO_ASSUME: "infra-admin-ci" + TF_VAR_suffix: "ci-v11" + TF_VAR_aws_region: "us-west-2" TF_AUTO_VARS_FILE: | tags = { "cb-owner" : "professional-services" @@ -27,7 +36,6 @@ env: email = "ci.user@cloudbees.io" company = "CloudBees Inc." } - suffix = "ci-v11" ci = true jobs: @@ -37,7 +45,7 @@ jobs: - name: Configure AWS Credentials uses: cloudbees-io/configure-aws-credentials@v1 with: - aws-region: ${{ env.AWS_REGION }} + aws-region: ${{ env.AWS_REGION_TF_BUCKET }} aws-access-key-id: ${{ secrets.AWS_TF_CBCI_EKS_AccessKeyID }} aws-secret-access-key: ${{ secrets.AWS_TF_CBCI_EKS_SecretAccessKey }} role-to-assume: ${{ env.AWS_ROLE_TO_ASSUME }} @@ -49,13 +57,16 @@ jobs: uses: docker://public.ecr.aws/r1n1q0e5/cloudbees-labs/tf-aws-cb-ci-eks-addon-agent:latest shell: bash run: | + set -x aws s3api create-bucket \ --bucket ${{ env.BUCKET_NAME_TF_STATE }} \ - --region ${{ env.AWS_REGION }} || echo "Bucket ${{ env.BUCKET_NAME_TF_STATE }} already exists" + --region ${{ env.AWS_REGION_TF_BUCKET }} || echo "Bucket ${{ env.BUCKET_NAME_TF_STATE }} already exists" bp01: env: - CLEAN_UP: "false" + ROOT: 01-getting-started + TF_VAR_hosted_zone: bp01.aws.ps.beescloud.com + STAGES: "deploy,validate,destroy" needs: - init steps: @@ -63,7 +74,7 @@ jobs: - name: Configure AWS Credentials uses: cloudbees-io/configure-aws-credentials@v1 with: - aws-region: ${{ env.AWS_REGION }} + aws-region: ${{ env.TF_VAR_aws_region }} aws-access-key-id: ${{ secrets.AWS_TF_CBCI_EKS_AccessKeyID }} aws-secret-access-key: ${{ secrets.AWS_TF_CBCI_EKS_SecretAccessKey }} role-to-assume: ${{ env.AWS_ROLE_TO_ASSUME }} @@ -73,48 +84,83 @@ jobs: - name: Checkout code uses: cloudbees-io/checkout@v1 - - name: 01-getting-started - Deploy + - name: 01-getting-started - Set uses: docker://public.ecr.aws/r1n1q0e5/cloudbees-labs/tf-aws-cb-ci-eks-addon-agent:latest - env: - ROOT: 01-getting-started - TF_VAR_hosted_zone: bp01.aws.ps.beescloud.com shell: bash run : | cat <> blueprints/${{ env.ROOT }}/.auto.tfvars ${{ env.TF_AUTO_VARS_FILE }} EOT + cat blueprints/${{ env.ROOT }}/.auto.tfvars cat <> blueprints/${{ env.ROOT }}/backend.tf terraform { backend "s3" { bucket = "${{ env.BUCKET_NAME_TF_STATE }}" key = "${{ env.ROOT }}/ci.terraform.tfstate" - region = "${{ env.AWS_REGION }}" + region = "${{ env.AWS_REGION_TF_BUCKET }}" } } EOT - [ "${{ env.CLEAN_UP }}" = "true" ] && echo "Skipping... Clean up mode." || CI=true make deploy + + - name: 01-getting-started - Deploy + uses: docker://public.ecr.aws/r1n1q0e5/cloudbees-labs/tf-aws-cb-ci-eks-addon-agent:latest + if: contains(env.STAGES, 'deploy') + shell: bash + run : | + set -x + aws kms delete-alias --alias-name alias/eks/cbci-bp01-${{ env.TF_VAR_suffix }}-eks --region ${{ env.TF_VAR_aws_region }} || echo "Alias alias/eks/cbci-bp01-${{ env.TF_VAR_suffix }}-eks does not exist" + aws kms delete-alias --alias-name alias/eks/cbci-bp01-${{ env.TF_VAR_suffix }} --region ${{ env.TF_VAR_aws_region }} || echo "Alias alias/eks/cbci-bp01-${{ env.TF_VAR_suffix }} does not exist" + CI=true make deploy + aws s3api put-object \ + --bucket ${{ env.BUCKET_NAME_TF_STATE }} \ + --region ${{ env.AWS_REGION_TF_BUCKET }} \ + --key ${{ env.ROOT }}/${{ env.ROOT }}.terraform.output \ + --body blueprints/${{ env.ROOT }}/terraform.output - name: 01-getting-started - Validate uses: docker://public.ecr.aws/r1n1q0e5/cloudbees-labs/tf-aws-cb-ci-eks-addon-agent:latest - env: - ROOT: 01-getting-started + if: contains(env.STAGES, 'validate') shell: bash run : | - [ "${{ env.CLEAN_UP }}" = "true" ] && echo "Skipping... Clean up mode." || CI=true make validate + CI=true make validate - name: 01-getting-started - Destroy uses: docker://public.ecr.aws/r1n1q0e5/cloudbees-labs/tf-aws-cb-ci-eks-addon-agent:latest + if: contains(env.STAGES, 'destroy') + shell: bash + run : | + CI=true make destroy + + - name: 01-getting-started - Wipeout + uses: docker://public.ecr.aws/r1n1q0e5/cloudbees-labs/tf-aws-cb-ci-eks-addon-agent:latest + if: contains(env.STAGES, 'wipeout') + shell: bash + run : | + terraform -chdir=blueprints/${{ env.ROOT }} init -reconfigure && CI=true make destroy + + - name: 01-getting-started - Role Onboarding + uses: docker://public.ecr.aws/r1n1q0e5/cloudbees-labs/tf-aws-cb-ci-eks-addon-agent:latest + if: contains(env.STAGES, 'onboarding') env: - ROOT: 01-getting-started - TF_VAR_hosted_zone: bp01.aws.ps.beescloud.com + TARGET_ROLE: arn:aws:iam::324005994172:role/AWSReservedSSO_infra-admin_256addbf79cfacd1 shell: bash run : | - [ "${{ env.CLEAN_UP }}" = "true" ] && terraform -chdir=blueprints/${{ env.ROOT }} init -reconfigure && CI=true make destroy \ - || CI=true make destroy + set -x + cd blueprints/${{ env.ROOT }} && eval $(terraform output --raw kubeconfig_export) + kubectl describe configmap aws-auth -n kube-system + eksctl create iamidentitymapping \ + --cluster $(terraform output --raw eks_cluster_name) \ + --region ${{ env.TF_VAR_aws_region }} \ + --arn ${{ env.TARGET_ROLE }} \ + --username k8s-admin-rol \ + --group system:masters + kubectl describe configmap aws-auth -n kube-system bp02: env: - CLEAN_UP: "false" + ROOT: 02-at-scale + TF_VAR_hosted_zone: bp02.aws.ps.beescloud.com + STAGES: "deploy,validate,destroy" needs: - init steps: @@ -122,7 +168,7 @@ jobs: - name: Configure AWS Credentials uses: cloudbees-io/configure-aws-credentials@v1 with: - aws-region: ${{ env.AWS_REGION }} + aws-region: ${{ env.TF_VAR_aws_region }} aws-access-key-id: ${{ secrets.AWS_TF_CBCI_EKS_AccessKeyID }} aws-secret-access-key: ${{ secrets.AWS_TF_CBCI_EKS_SecretAccessKey }} role-to-assume: ${{ env.AWS_ROLE_TO_ASSUME }} @@ -132,43 +178,79 @@ jobs: - name: Checkout code uses: cloudbees-io/checkout@v1 - - name: 02-at-scale - Deploy + - name: 02-at-scale - Set uses: docker://public.ecr.aws/r1n1q0e5/cloudbees-labs/tf-aws-cb-ci-eks-addon-agent:latest - env: - ROOT: 02-at-scale - TF_VAR_hosted_zone: bp02.aws.ps.beescloud.com shell: bash run : | cat <> blueprints/${{ env.ROOT }}/.auto.tfvars ${{ env.TF_AUTO_VARS_FILE }} - gh_user = "exampleUser" - gh_token = "ExampleToken1234" + dh_reg_secret_auth = { + username = "${{ secrets.AWS_TF_CBCI_EKS_DHUser }}" + password = "${{ secrets.AWS_TF_CBCI_EKS_DHPass }}" + email = "${{ secrets.AWS_TF_CBCI_EKS_DHMail }}" + } EOT + cat blueprints/${{ env.ROOT }}/.auto.tfvars cat <> blueprints/${{ env.ROOT }}/backend.tf terraform { backend "s3" { bucket = "${{ env.BUCKET_NAME_TF_STATE }}" key = "${{ env.ROOT }}/ci.terraform.tfstate" - region = "${{ env.AWS_REGION }}" + region = "${{ env.AWS_REGION_TF_BUCKET }}" } } EOT - [ "${{ env.CLEAN_UP }}" = "true" ] && echo "Skipping... Clean up mode." || CI=true make deploy + + - name: 02-at-scale - Deploy + uses: docker://public.ecr.aws/r1n1q0e5/cloudbees-labs/tf-aws-cb-ci-eks-addon-agent:latest + if: contains(env.STAGES, 'deploy') + shell: bash + run : | + set -x + aws kms delete-alias --alias-name alias/eks/cbci-bp02-${{ env.TF_VAR_suffix }}-eks --region ${{ env.TF_VAR_aws_region }} || echo "Alias alias/eks/cbci-bp02-${{ env.TF_VAR_suffix }}-eks does not exist" + aws kms delete-alias --alias-name alias/eks/cbci-bp02-${{ env.TF_VAR_suffix }} --region ${{ env.TF_VAR_aws_region }} || echo "Alias alias/eks/cbci-bp02-${{ env.TF_VAR_suffix }} does not exist" + CI=true make deploy + aws s3api put-object \ + --bucket ${{ env.BUCKET_NAME_TF_STATE }} \ + --region ${{ env.AWS_REGION_TF_BUCKET }} \ + --key ${{ env.ROOT }}/${{ env.ROOT }}.terraform.output \ + --body blueprints/${{ env.ROOT }}/terraform.output - name: 02-at-scale - Validate uses: docker://public.ecr.aws/r1n1q0e5/cloudbees-labs/tf-aws-cb-ci-eks-addon-agent:latest - env: - ROOT: 02-at-scale + if: contains(env.STAGES, 'validate') shell: bash run : | - [ "${{ env.CLEAN_UP }}" = "true" ] && echo "Skipping... Clean up mode." || CI=true make validate + CI=true make validate - name: 02-at-scale - Destroy uses: docker://public.ecr.aws/r1n1q0e5/cloudbees-labs/tf-aws-cb-ci-eks-addon-agent:latest + if: contains(env.STAGES, 'destroy') + shell: bash + run : | + CI=true make destroy + + - name: 02-at-scale - Wipeout + uses: docker://public.ecr.aws/r1n1q0e5/cloudbees-labs/tf-aws-cb-ci-eks-addon-agent:latest + if: contains(env.STAGES, 'wipeout') + shell: bash + run : | + terraform -chdir=blueprints/${{ env.ROOT }} init -reconfigure && CI=true make destroy + + - name: 02-at-scale - Role Onboarding + uses: docker://public.ecr.aws/r1n1q0e5/cloudbees-labs/tf-aws-cb-ci-eks-addon-agent:latest + if: contains(env.STAGES, 'onboarding') env: - ROOT: 02-at-scale - TF_VAR_hosted_zone: bp02.aws.ps.beescloud.com + TARGET_ROLE: arn:aws:iam::324005994172:role/AWSReservedSSO_infra-admin_256addbf79cfacd1 shell: bash run : | - [ "${{ env.CLEAN_UP }}" = "true" ] && terraform -chdir=blueprints/${{ env.ROOT }} init -reconfigure && CI=true make destroy \ - || CI=true make destroy + set -x + cd blueprints/${{ env.ROOT }} && eval $(terraform output --raw kubeconfig_export) + kubectl describe configmap aws-auth -n kube-system + eksctl create iamidentitymapping \ + --cluster $(terraform output --raw eks_cluster_name) \ + --region ${{ env.TF_VAR_aws_region }} \ + --arn ${{ env.TARGET_ROLE }} \ + --username k8s-admin-rol \ + --group system:masters + kubectl describe configmap aws-auth -n kube-system diff --git a/.docker/agent/agent.root.Dockerfile b/.docker/agent/agent.root.Dockerfile index 3cf92e30..010cb24f 100644 --- a/.docker/agent/agent.root.Dockerfile +++ b/.docker/agent/agent.root.Dockerfile @@ -7,6 +7,7 @@ FROM alpine:3.19.0 ENV TF_VERSION=1.6.6 \ KUBECTL_VERSION=1.29.0 \ VELERO_VERSION=1.13.0 \ + EKSCTL_VERSION=0.188.0 \ ARCH=amd64 RUN apk add --update --no-cache \ @@ -33,3 +34,8 @@ RUN curl -sLO https://github.com/vmware-tanzu/velero/releases/download/v${VELERO mv velero-v${VELERO_VERSION}-linux-${ARCH}/velero /usr/bin/velero && \ chmod +x /usr/bin/velero && \ rm velero-v${VELERO_VERSION}-linux-${ARCH}.tar.gz + +RUN curl -sLO "https://github.com/weaveworks/eksctl/releases/download/v${EKSCTL_VERSION}/eksctl_Linux_${ARCH}.tar.gz" && \ + tar -xzf eksctl_Linux_${ARCH}.tar.gz -C /usr/bin && \ + chmod +x /usr/bin/eksctl && \ + rm eksctl_Linux_${ARCH}.tar.gz diff --git a/blueprints/01-getting-started/README.md b/blueprints/01-getting-started/README.md index b56ae0ff..0c11a238 100644 --- a/blueprints/01-getting-started/README.md +++ b/blueprints/01-getting-started/README.md @@ -51,6 +51,7 @@ This blueprint presents the minimum setup to run CloudBees CI on Amazon EKS; one | cbci_oc_pod | Operations center pod for the CloudBees CI add-on. | | cbci_oc_url | URL of the CloudBees CI operations center for the CloudBees CI add-on. | | eks_cluster_arn | Amazon EKS cluster ARN. | +| eks_cluster_name | Amazon EKS cluster Name. | | kubeconfig_add | Add kubeconfig to your local configuration to access the Kubernetes API. | | kubeconfig_export | Export the KUBECONFIG environment variable to access the Kubernetes API. | | vpc_arn | VPC ID. | diff --git a/blueprints/01-getting-started/main.tf b/blueprints/01-getting-started/main.tf index 0c02bef7..5fc4a90b 100644 --- a/blueprints/01-getting-started/main.tf +++ b/blueprints/01-getting-started/main.tf @@ -31,8 +31,9 @@ locals { # CloudBees CI Add-on module "eks_blueprints_addon_cbci" { - source = "cloudbees/cloudbees-ci-eks-addon/aws" - version = ">= 3.18072.0" + #source = "cloudbees/cloudbees-ci-eks-addon/aws" + #version = ">= 3.18072.0" + source = "../../" depends_on = [module.eks_blueprints_addons] diff --git a/blueprints/01-getting-started/outputs.tf b/blueprints/01-getting-started/outputs.tf index a74f5b03..971a3474 100644 --- a/blueprints/01-getting-started/outputs.tf +++ b/blueprints/01-getting-started/outputs.tf @@ -1,4 +1,3 @@ - output "kubeconfig_export" { description = "Export the KUBECONFIG environment variable to access the Kubernetes API." value = "export KUBECONFIG=${local.kubeconfig_file_path}" @@ -64,3 +63,8 @@ output "eks_cluster_arn" { description = "Amazon EKS cluster ARN." value = module.eks.cluster_arn } + +output "eks_cluster_name" { + description = "Amazon EKS cluster Name." + value = module.eks.cluster_name +} diff --git a/blueprints/02-at-scale/README.md b/blueprints/02-at-scale/README.md index 33ce8748..83f1952d 100644 --- a/blueprints/02-at-scale/README.md +++ b/blueprints/02-at-scale/README.md @@ -100,6 +100,7 @@ This blueprint divides scalable node groups for different types of workloads: | efs_access_points | Amazon EFS access points. | | efs_arn | Amazon EFS ARN. | | eks_cluster_arn | Amazon EKS cluster ARN. | +| eks_cluster_name | Amazon EKS cluster Name. | | global_password | Random string that is used as the global password. | | grafana_dashboard | Provides access to Grafana dashboards. | | kubeconfig_add | Add kubeconfig to the local configuration to access the Kubernetes API. | @@ -109,8 +110,10 @@ This blueprint divides scalable node groups for different types of workloads: | s3_cbci_arn | CloudBees CI Amazon S3 bucket ARN. | | s3_cbci_name | CloudBees CI Amazon S3 bucket name. It is required by CloudBees CI for workspace caching and artifact management. | | s3_list_objects | Recursively lists all objects stored in the Amazon S3 bucket. | -| vault_configure | Provides access to Hashicorp Vault dashboard. It requires the root token from the vault_init output. | +| vault_configure | Configure Vault with initial secrets and creates approle for integration with CloudBees CI (role-id and secret-id). It requires unseal keys and the root token from the vault_init output. | | vault_dashboard | Provides access to Hashicorp Vault dashboard. It requires the root token from the vault_init output. | +| vault_init | Inicialization of Vault Service. | +| vault_init_log_file | Vault Inicialization log file. | | velero_backup_on_demand | Takes an on-demand Velero backup from the schedule for the selected controller that is using block storage. | | velero_backup_schedule | Creates a Velero backup schedule for the selected controller that is using block storage, and then deletes the existing schedule, if it exists. | | velero_restore | Restores the selected controller that is using block storage from a backup. | @@ -206,25 +209,31 @@ DockerHub authentication is stored as Kubernetes secrets (`cbci-agent-sec-reg`) HashiCorp Vault is used as a credential provider for CloudBees CI Pipelines in this blueprint. -1. Run the configure Hashicorp Vault script. Store the admin token and unseal keys (saved in `k8s/vault-init.log`) and role ID and secret ID for the `cbci-oc` application role in a safe place. +1. Initialize Hashicorp Vault. Keep in a safe place Admin Token and Unseal Keys (saved in `k8s/vault-init.log`). + + ```sh + eval $(terraform output --raw vault_init) + ``` + +2. Run the configure Hashicorp Vault script. It configures Vault with initial secrets and creates `approle` for integration with CloudBees CI (role-id and secret-id) ```sh eval $(terraform output --raw vault_configure) ``` -2. Issue the following command to access the HashiCorp Vault UI. Enter the root token to sign in from _step 1_. +3. Access the HashiCorp Vault UI by issuing the following command. Enter the root token to log in from the _step 1_. ```sh eval $(terraform output --raw vault_dashboard) ``` -3. Sign in to the CloudBees CI operations center as a user with the admin role. +4. Sign in to the CloudBees CI operations center as a user with the admin role. -4. Navigate to **Manage Jenkins > Credentials Providers > HashiCorp Vault Credentials Provider** and complete the configuration for the CloudBees CI Vault Plugin by entering the role ID and secret ID for the `cbci-oc` application role from _step 1_. +5. Navigate to **Manage Jenkins > Credentials Providers > HashiCorp Vault Credentials Provider** and complete the configuration for the CloudBees CI Vault Plugin by entering the role ID and secret ID for the `cbci-oc` application role from _step 1_. -5. Select **Test Connection** to verify the inputs are correct. +6. Select **Test Connection** to verify the inputs are correct. -6. Move to `team-b` or `team-c-ha` to run the Pipeline (**admin > validations > vault-credentials**) and validate that credentials are fetched correctly from the Hashicorp Vault. +7. Move to `team-b` or `team-c-ha` to run the Pipeline (**admin > validations > vault-credentials**) and validate that credentials are fetched correctly from the Hashicorp Vault. > [!NOTE] > Hashicorp Vault can be also be configured to be used for [Configuration as Code - Handling Secrets - Vault](https://github.com/jenkinsci/configuration-as-code-plugin/blob/master/docs/features/secrets.adoc#hashicorp-vault-secret-source). diff --git a/blueprints/02-at-scale/cbci/casc/mc/parent/jcasc/main.yaml b/blueprints/02-at-scale/cbci/casc/mc/parent/jcasc/main.yaml index 417330c2..301da1c9 100644 --- a/blueprints/02-at-scale/cbci/casc/mc/parent/jcasc/main.yaml +++ b/blueprints/02-at-scale/cbci/casc/mc/parent/jcasc/main.yaml @@ -49,6 +49,8 @@ unclassified: git: remote: ${sharedLibRepo} aws: + awsCredentials: + region: "${sec_awsRegion}" s3: container: "${sec_s3bucketName}" disableSessionToken: false diff --git a/blueprints/02-at-scale/cbci/casc/mc/parent/variables/variables.yaml b/blueprints/02-at-scale/cbci/casc/mc/parent/variables/variables.yaml index 429fcc4d..51c51f17 100644 --- a/blueprints/02-at-scale/cbci/casc/mc/parent/variables/variables.yaml +++ b/blueprints/02-at-scale/cbci/casc/mc/parent/variables/variables.yaml @@ -3,5 +3,5 @@ variables: - s3bucketPreffix: "cbci" - regSecretsName: "cbci-sec-reg" - sharedLibRepo: "https://github.com/cloudbees/terraform-aws-cloudbees-ci-eks-addon.git" - - sharedLibBranch: develop + - sharedLibBranch: cb-platform - sharedLibPath: "blueprints/02-at-scale/cbci/shared-lib" diff --git a/blueprints/02-at-scale/cbci/casc/oc/items/root.yaml b/blueprints/02-at-scale/cbci/casc/oc/items/root.yaml index 18c4dce4..30723991 100644 --- a/blueprints/02-at-scale/cbci/casc/oc/items/root.yaml +++ b/blueprints/02-at-scale/cbci/casc/oc/items/root.yaml @@ -53,7 +53,7 @@ items: "cloudbees.prometheus": "true" properties: - configurationAsCode: - bundle: "develop/none-ha" + bundle: "cb-platform/none-ha" # Casc, HA - kind: managedController name: team-c-ha @@ -100,4 +100,4 @@ items: "cloudbees.prometheus": "true" properties: - configurationAsCode: - bundle: "develop/ha" + bundle: "cb-platform/ha" diff --git a/blueprints/02-at-scale/cbci/casc/oc/variables/variables.yaml b/blueprints/02-at-scale/cbci/casc/oc/variables/variables.yaml index 82f69637..bd3cf94a 100644 --- a/blueprints/02-at-scale/cbci/casc/oc/variables/variables.yaml +++ b/blueprints/02-at-scale/cbci/casc/oc/variables/variables.yaml @@ -1,7 +1,7 @@ variables: - message: "Welcome to the CloudBees CI blueprint add-on: At scale!" - cascRepo: "https://github.com/cloudbees/terraform-aws-cloudbees-ci-eks-addon.git" - - cascBranch: develop + - cascBranch: cb-platform - cascPathController: "/blueprints/02-at-scale/cbci/casc/mc/" - ldapManagerDN: "cn=admin,dc=acme,dc=org" - ldapRootDN: "dc=acme,dc=org" diff --git a/blueprints/02-at-scale/k8s/cbci-values.yml b/blueprints/02-at-scale/k8s/cbci-values.yml index e0062df8..354dc86f 100644 --- a/blueprints/02-at-scale/k8s/cbci-values.yml +++ b/blueprints/02-at-scale/k8s/cbci-values.yml @@ -16,7 +16,7 @@ OperationsCenter: Retriever: Enabled: true scmRepo: "https://github.com/cloudbees/terraform-aws-cloudbees-ci-eks-addon.git" - scmBranch: develop + scmBranch: cb-platform scmBundlePath: blueprints/02-at-scale/cbci/casc/oc scmPollingInterval: PT20M Persistence: diff --git a/blueprints/02-at-scale/k8s/vault-config.sh b/blueprints/02-at-scale/k8s/vault-config.sh index a09c0358..4f77929a 100644 --- a/blueprints/02-at-scale/k8s/vault-config.sh +++ b/blueprints/02-at-scale/k8s/vault-config.sh @@ -4,16 +4,12 @@ set -xeuo pipefail -HERE="$( cd -P "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" - # Vault namespace vault_ns="${1:-vault}" # App role name approle="cbci-oc" # https://github.com/hashicorp/terraform-aws-hashicorp-vault-eks-addon?tab=readme-ov-file#usage -## Init vault -kubectl exec -it vault-0 -n "$vault_ns" -- vault operator init | tee "$HERE/vault-init.log" || echo "Vault already initialized" ## Useal the vault for i in {1..3}; do read -r -p "INFO: Enter Unseal Key number $i [press Enter]: " key diff --git a/blueprints/02-at-scale/main.tf b/blueprints/02-at-scale/main.tf index dc541efb..3751f26e 100644 --- a/blueprints/02-at-scale/main.tf +++ b/blueprints/02-at-scale/main.tf @@ -89,6 +89,7 @@ locals { vault_ns = "vault" vault_config_file_path = abspath("k8s/vault-config.sh") + vault_init_file_path = abspath("k8s/vault-init.log") } resource "random_string" "global_pass_string" { diff --git a/blueprints/02-at-scale/outputs.tf b/blueprints/02-at-scale/outputs.tf index 1c7da431..d57b8239 100644 --- a/blueprints/02-at-scale/outputs.tf +++ b/blueprints/02-at-scale/outputs.tf @@ -1,4 +1,3 @@ - output "kubeconfig_export" { description = "Export the KUBECONFIG environment variable to access the Kubernetes API." value = "export KUBECONFIG=${local.kubeconfig_file_path}" @@ -115,6 +114,11 @@ output "eks_cluster_arn" { value = module.eks.cluster_arn } +output "eks_cluster_name" { + description = "Amazon EKS cluster Name." + value = module.eks.cluster_name +} + output "s3_cbci_arn" { description = "CloudBees CI Amazon S3 bucket ARN." value = module.cbci_s3_bucket.s3_bucket_arn @@ -185,8 +189,18 @@ output "global_password" { value = "kubectl get secret ${module.eks_blueprints_addon_cbci.cbci_sec_casc} -n ${module.eks_blueprints_addon_cbci.cbci_namespace} -o jsonpath=${local.global_pass_jsonpath} | base64 -d" } +output "vault_init" { + description = "Inicialization of Vault Service." + value = "kubectl exec -it vault-0 -n ${local.vault_ns} -- vault operator init | tee ${local.vault_init_file_path} || echo \"Vault initialization failed.\"" +} + +output "vault_init_log_file" { + description = "Vault Inicialization log file." + value = local.vault_init_file_path +} + output "vault_configure" { - description = "Provides access to Hashicorp Vault dashboard. It requires the root token from the vault_init output." + description = "Configure Vault with initial secrets and creates approle for integration with CloudBees CI (role-id and secret-id). It requires unseal keys and the root token from the vault_init output." value = "bash ${local.vault_config_file_path} ${local.vault_ns}" }