diff --git a/.github/workflows/automerge.yml b/.github/workflows/automerge.yml new file mode 100644 index 0000000..6f02ecd --- /dev/null +++ b/.github/workflows/automerge.yml @@ -0,0 +1,12 @@ +--- +name: Auto merge +on: + pull_request: +jobs: + auto-merge: + uses: clouddrove/github-shared-workflows/.github/workflows/auto_merge.yml@1.2.1 + secrets: + GITHUB: ${{ secrets.GITHUB }} + with: + tfcheck: 'tf-checks-complete-example / Check code format' +... \ No newline at end of file diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml index 1ee6f78..3e88b85 100644 --- a/.github/workflows/changelog.yml +++ b/.github/workflows/changelog.yml @@ -6,8 +6,8 @@ on: - "*" workflow_dispatch: jobs: - changelog: + call-workflow-changelog: uses: clouddrove/github-shared-workflows/.github/workflows/changelog.yml@master secrets: inherit with: - branch: 'master' + branch: 'master' \ No newline at end of file diff --git a/.github/workflows/readme.yml b/.github/workflows/readme.yml index 2a7c299..6ffb637 100644 --- a/.github/workflows/readme.yml +++ b/.github/workflows/readme.yml @@ -1,54 +1,50 @@ name: 'Create README.md file' +permissions: write-all + on: push: branches: - - master + - feat/addons + paths-ignore: + - '**/*README.md' + workflow_dispatch: jobs: - readme: - name: 'readme-create' - runs-on: ubuntu-latest - steps: - - name: 'Checkout' - uses: actions/checkout@master - - - name: 'Set up Python 3.7' - uses: actions/setup-python@v5 - with: - python-version: '3.x' - - name: 'create readme' - uses: 'clouddrove/github-actions@9.0.3' - with: - actions_subcommand: 'readme' - github_token: '${{ secrets.GITHUB }}' - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - - - name: 'pre-commit check errors' - uses: pre-commit/action@v3.0.0 - continue-on-error: true + readme-create: + uses: clouddrove/github-shared-workflows/.github/workflows/readme.yml@master + secrets: + TOKEN: ${{ secrets.GITHUB }} + SLACK_WEBHOOK_TERRAFORM: ${{ secrets.SLACK_WEBHOOK_TERRAFORM }} - - name: 'pre-commit fix erros' - uses: pre-commit/action@v3.0.0 - continue-on-error: true + # Create README for each Submodules + readme-create-addon: + name: 'Addon readme-create' + runs-on: ubuntu-latest + steps: + - name: Updating GitHub Token + env: + GITHUB_TOKEN: ${{ secrets.GITHUB }} + run: echo "GH_TOKEN=${GITHUB_TOKEN}" >> $GITHUB_ENV - - name: 'push readme' - uses: 'clouddrove/github-actions@9.0.3' - continue-on-error: true + - name: checkout + uses: actions/checkout@master with: - actions_subcommand: 'push' - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + fetch-depth: 0 + ref: ${{ github.head_ref }} + token: ${{ env.GH_TOKEN }} - - name: 'Slack Notification' - uses: clouddrove/action-slack@v2 + - name: Generate TF Docs + uses: terraform-docs/gh-actions@v1.0.0 with: - status: ${{ job.status }} - fields: repo,author - author_name: 'CloudDrove' - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # required - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_TERRAFORM }} # required - if: always() + working-dir: addons/cluster-autoscaler,addons/cert-manager,addons/ingress-nginx,addons/keda,addons/reloader + git-push: true + template: |- + + {{ .Content }} + + git-push-user-name: "clouddrove-ci" + git-push-user-email: 84795582+clouddrove-ci@users.noreply.github.com + git-commit-message: "readme: Update add-on's readme" + + \ No newline at end of file diff --git a/.github/workflows/tf-checks.yml b/.github/workflows/tf-checks.yml new file mode 100644 index 0000000..940051d --- /dev/null +++ b/.github/workflows/tf-checks.yml @@ -0,0 +1,15 @@ +name: tf-checks +on: + push: + branches: [ master ] + pull_request: + workflow_dispatch: +jobs: + tf-checks-complete-example: + uses: clouddrove/github-shared-workflows/.github/workflows/tf-checks.yml@master + with: + working_directory: './_examples/complete/' + tf-checks-basic-example: + uses: clouddrove/github-shared-workflows/.github/workflows/tf-checks.yml@master + with: + working_directory: './_examples/basic/' \ No newline at end of file diff --git a/.github/workflows/tflint.yml b/.github/workflows/tflint.yml new file mode 100644 index 0000000..ee98182 --- /dev/null +++ b/.github/workflows/tflint.yml @@ -0,0 +1,11 @@ +name: tf-lint +on: + push: + branches: [ master ] + pull_request: + workflow_dispatch: +jobs: + tf-lint: + uses: clouddrove/github-shared-workflows/.github/workflows/tf-lint.yml@master + secrets: + GITHUB: ${{ secrets.GITHUB }} \ No newline at end of file diff --git a/.github/workflows/tfsec.yml b/.github/workflows/tfsec.yml new file mode 100644 index 0000000..9aaf588 --- /dev/null +++ b/.github/workflows/tfsec.yml @@ -0,0 +1,11 @@ +name: tfsec +permissions: write-all +on: + pull_request: + workflow_dispatch: +jobs: + tfsec: + uses: clouddrove/github-shared-workflows/.github/workflows/tfsec.yml@master + secrets: inherit + with: + working_directory: '.' \ No newline at end of file diff --git a/README.yaml b/README.yaml index 43fe5c4..6b5a709 100644 --- a/README.yaml +++ b/README.yaml @@ -2,25 +2,23 @@ # # This is the canonical configuration for the `README.md` # Run `make readme` to rebuild the `README.md` -# # Name of this project -name : Terraform Module Template - +name: Terraform GOOGLE GKE ADDONS # License of this project license: "APACHE" # Canonical GitHub repo -github_repo: clouddrove/terraform-module-template +github_repo: clouddrove/terraform-google-gke-addons # Badges to display badges: - name: "Latest Release" - image: "https://img.shields.io/github/release/clouddrove/terraform-module-template.svg" - url: "https://github.com/clouddrove/terraform-module-template/releases/latest" + image: "https://img.shields.io/github/release/clouddrove/terraform-google-gke-addons.svg" + url: "https://github.com/clouddrove/terraform-google-gke-addons/releases/latest" - name: "tfsec" - image: "https://github.com/clouddrove/terraform-module-template/actions/workflows/tfsec.yml/badge.svg" - url: "" + image: "https://github.com/clouddrove/terraform-google-gke-addons/actions/workflows/tfsec.yml/badge.svg" + url: "https://github.com/clouddrove/terraform-google-gke-addons/actions/workflows/tfsec.yml" - name: "Licence" image: "https://img.shields.io/badge/License-APACHE-blue.svg" url: "LICENSE.md" @@ -31,14 +29,7 @@ prerequesties: # description of this project description: |- - Terraform module template to create new modules using this as baseline + A Terraform Addons module to customize & install widely used helmchart during or after creation of your AWS EKS cluster. # extra content include: - - "terraform.md" - -# How to use this project -# How to use this project -usage: |- - Here are some examples of how you can use this module in your inventory structure: - ```hcl - ``` + - "terraform.md" \ No newline at end of file diff --git a/_examples/basic/locals.tf b/_examples/basic/locals.tf new file mode 100644 index 0000000..83114d5 --- /dev/null +++ b/_examples/basic/locals.tf @@ -0,0 +1,14 @@ +locals { + name = "helm-addons-test" + environment = "test" + region = "us-central1" + cluster_version = "1.28.3-gke.1203001" + gcp_project_id = "dev-env-3b53" + cluster_name = "test-cluster" + tags = { + Name = local.name + Environment = local.environment + GithubRepo = "terraform-helm-gke-addons" + GithubOrg = "clouddrove" + } +} \ No newline at end of file diff --git a/_examples/basic/main.tf b/_examples/basic/main.tf index a07c2e6..75e5ecb 100644 --- a/_examples/basic/main.tf +++ b/_examples/basic/main.tf @@ -1,4 +1,200 @@ -# ------------------------------------------------------------------------------ -# Resources -# ------------------------------------------------------------------------------ -locals {} + +provider "google" { + project = local.gcp_project_id +} + +############################################################################### +# GCP NETWORKING RESOURCES +############################################################################### + + +module "vpc" { + source = "terraform-google-modules/network/google" + version = "~> 8.1" + + project_id = local.gcp_project_id + network_name = "${local.name}-vpc" + routing_mode = "GLOBAL" + + subnets = [ + { + subnet_name = "${local.name}-subnet-public-1" + subnet_ip = "10.10.10.0/24" + subnet_region = "us-central1" + }, + { + subnet_name = "${local.name}-subnet-private-1" + subnet_ip = "10.10.20.0/24" + subnet_region = "us-central1" + subnet_private_access = "true" + subnet_flow_logs = "true" + description = "This subnet has a description" + }, + { + subnet_name = "${local.name}-subnet-private-2" + subnet_ip = "10.10.30.0/24" + subnet_region = "us-central1" + subnet_private_access = "true" + subnet_flow_logs = "true" + description = "This subnet has used for GKE" + } + ] + + secondary_ranges = { + subnet-public-1 = [ + { + range_name = "${local.name}-subnet-private-1-secondary-01" + ip_cidr_range = "192.168.64.0/24" + }, + ], + subnet-public-2 = [ + { + range_name = "${local.name}-subnet-private-2-secondary-01" + ip_cidr_range = "192.168.128.0/24" + }, + ] + } + + routes = [ + { + name = "egress-internet" + description = "route through IGW to access internet" + destination_range = "0.0.0.0/0" + tags = "egress-inet" + next_hop_internet = "true" + }, + ] +} + +############################################################################### +# GCP GKE +############################################################################### + +module "gke" { + source = "terraform-google-modules/kubernetes-engine/google//modules/beta-private-cluster" + version = "29.0.0" + project_id = local.gcp_project_id + name = local.cluster_name + region = local.region + kubernetes_version = local.cluster_version + zones = [] + network = module.vpc.network_name + subnetwork = "${local.name}-subnet-private-2" + ip_range_pods = "" + ip_range_services = "" + horizontal_pod_autoscaling = true + http_load_balancing = true + filestore_csi_driver = true + istio = true + create_service_account = true + remove_default_node_pool = true + disable_legacy_metadata_endpoints = false + deletion_protection = false + + node_pools = [ + + { + name = "general" + machine_type = "g1-small" + node_locations = "${local.region}-a" + min_count = 1 + max_count = 5 + local_ssd_count = 0 + spot = false + disk_size_gb = 10 + disk_type = "pd-standard" + image_type = "ubuntu_containerd" + enable_gcfs = false + enable_gvnic = false + logging_variant = "DEFAULT" + auto_repair = true + auto_upgrade = true + create_service_account = true + preemptible = false + initial_node_count = 1 + enable_node_pool_autoscaling = true + }, + { + name = "critical" + machine_type = "g1-small" + node_locations = "${local.region}-b" + min_count = 1 + max_count = 3 + local_ssd_count = 0 + spot = false + disk_size_gb = 10 + disk_type = "pd-standard" + image_type = "ubuntu_containerd" + enable_gcfs = false + enable_gvnic = false + logging_variant = "DEFAULT" + auto_repair = true + auto_upgrade = true + create_service_account = true + preemptible = false + initial_node_count = 1 + enable_node_pool_autoscaling = false + }, + ] + + + node_pools_labels = { + all = {} + + default-node-pool = { + default-node-pool = true + } + } + + node_pools_metadata = { + all = {} + + default-node-pool = { + node-pool-metadata-custom-value = "my-node-pool" + } + } + + node_pools_taints = { + all = [] + + default-node-pool = [ + { + key = "default-node-pool" + value = true + effect = "PREFER_NO_SCHEDULE" + }, + ] + } + + node_pools_tags = { + all = [ + local.tags.Name, + local.tags.Environment, + local.tags.GithubRepo, + local.tags.GithubOrg, + ] + default-node-pool = [ + "default-node-pool", + ] + } +} + + +############################################################################### +# GCP ADDONS +############################################################################### + +module "addons" { + source = "../../" + + depends_on = [module.gke] + gke_cluster_name = module.gke.name + project_id = local.gcp_project_id + region = local.region + + cluster_autoscaler = true + reloader = true + ingress_nginx = true + certification_manager = true + keda = true +} \ No newline at end of file diff --git a/_examples/basic/provider.tf b/_examples/basic/provider.tf new file mode 100644 index 0000000..2d52438 --- /dev/null +++ b/_examples/basic/provider.tf @@ -0,0 +1,25 @@ +# Retrieve an access token as the Terraform runner +data "google_client_config" "provider" {} + +data "google_container_cluster" "my_cluster" { + name = module.gke.name + location = "us-central1" +} + +provider "kubernetes" { + host = "https://${data.google_container_cluster.my_cluster.endpoint}" + token = data.google_client_config.provider.access_token + cluster_ca_certificate = base64decode( + data.google_container_cluster.my_cluster.master_auth[0].cluster_ca_certificate, + ) +} + +provider "helm" { + kubernetes { + host = "https://${data.google_container_cluster.my_cluster.endpoint}" + token = data.google_client_config.provider.access_token + cluster_ca_certificate = base64decode( + data.google_container_cluster.my_cluster.master_auth[0].cluster_ca_certificate + ) + } +} \ No newline at end of file diff --git a/_examples/basic/version.tf b/_examples/basic/version.tf new file mode 100644 index 0000000..09897b0 --- /dev/null +++ b/_examples/basic/version.tf @@ -0,0 +1,30 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + google = { + source = "hashicorp/google" + version = ">= 5.10.0" + } + time = { + source = "hashicorp/time" + version = ">= 0.8" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.6" + } + kubectl = { + source = "gavinbunney/kubectl" + version = ">= 1.7.0" + } + local = { + source = "hashicorp/local" + version = ">= 2.0.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0" + } + } +} \ No newline at end of file diff --git a/_examples/complete/config/keda/override-keda.yaml b/_examples/complete/config/keda/override-keda.yaml new file mode 100644 index 0000000..34e6e90 --- /dev/null +++ b/_examples/complete/config/keda/override-keda.yaml @@ -0,0 +1,9 @@ +affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "cloud.google.com/gke-nodepool" + operator: In + values: + - "critical" \ No newline at end of file diff --git a/_examples/complete/config/keda/strategy_example.yaml b/_examples/complete/config/keda/strategy_example.yaml new file mode 100644 index 0000000..7e1a12b --- /dev/null +++ b/_examples/complete/config/keda/strategy_example.yaml @@ -0,0 +1,36 @@ +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: nginx-deployment + namespace: nginx +spec: + scaleTargetRef: + apiVersion: apps/v1 # Optional. Default: apps/v1 + kind: Deployment # Optional. Default: Deployment + name: nginx # Mandatory. Must be in the same namespace as the ScaledObject + pollingInterval: 5 # Optional. Default: 5 seconds + cooldownPeriod: 20 # Optional. Default: 300 seconds + minReplicaCount: 1 # Optional. Default: 0 + maxReplicaCount: 10 # Optional. Default: 100 + fallback: # Optional. Section to specify fallback options + failureThreshold: 3 # Mandatory if fallback section is included + replicas: 1 # Mandatory if fallback section is included + advanced: # Optional. Section to specify advanced options + restoreToOriginalReplicaCount: true # Optional. Default: false + horizontalPodAutoscalerConfig: # Optional. Section to specify HPA related options + name: keda-hpa-nginx # Optional. Default: keda-hpa-{scaled-object-name} + behavior: # Optional. Use to modify HPA's scaling behavior + scaleDown: + stabilizationWindowSeconds: 60 + policies: + - type: Percent + value: 100 + periodSeconds: 15 + triggers: + - type: cron + metadata: + # Required + timezone: Asia/Kolkata # The acceptable values would be a value from the IANA Time Zone Database. + start: "12 00 * * *" + end: "15 00 * * *" + desiredReplicas: "5" \ No newline at end of file diff --git a/_examples/complete/config/override-certification-manager.yaml b/_examples/complete/config/override-certification-manager.yaml new file mode 100644 index 0000000..15bac76 --- /dev/null +++ b/_examples/complete/config/override-certification-manager.yaml @@ -0,0 +1,19 @@ +affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "cloud.google.com/gke-nodepool" + operator: In + values: + - "critical" + +resources: + limits: + cpu: 200m + memory: 250Mi + requests: + cpu: 50m + memory: 150Mi + +installCRDs: true \ No newline at end of file diff --git a/_examples/complete/config/override-cluster-autoscaler.yaml b/_examples/complete/config/override-cluster-autoscaler.yaml new file mode 100644 index 0000000..fac294f --- /dev/null +++ b/_examples/complete/config/override-cluster-autoscaler.yaml @@ -0,0 +1,29 @@ +affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "cloud.google.com/gke-nodepool" + operator: In + values: + - "critical" + +## Using limits and requests +resourc_helm_configes: + limits: + cpu: 300m + memory: 250Mi + requests: + cpu: 50m + memory: 150Mi + +podAnnotations: + co.elastic.logs/enabled: "true" + +additionalLabels: {} +affinity: {} + +cloudProvider: gce + +extraArgs: + leader-elect: false \ No newline at end of file diff --git a/_examples/complete/config/override-ingress-nginx.yaml b/_examples/complete/config/override-ingress-nginx.yaml new file mode 100644 index 0000000..dfce83d --- /dev/null +++ b/_examples/complete/config/override-ingress-nginx.yaml @@ -0,0 +1,23 @@ +affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "cloud.google.com/gke-nodepool" + operator: In + values: + - "critical" + + +## Using limits and requests + +resources: + limits: + cpu: 150m + memory: 150Mi + requests: + cpu: 100m + memory: 90Mi + +podAnnotations: + co.elastic.logs/enabled: "true" \ No newline at end of file diff --git a/_examples/complete/config/reloader/override-reloader.yaml b/_examples/complete/config/reloader/override-reloader.yaml new file mode 100644 index 0000000..4fa56ca --- /dev/null +++ b/_examples/complete/config/reloader/override-reloader.yaml @@ -0,0 +1,20 @@ +reloader: + deployment: + # If you wish to run multiple replicas set reloader.enableHA = true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "cloud.google.com/gke-nodepool" + operator: In + values: + - "critical" + + resources: + limits: + cpu: "100m" + memory: "512Mi" + requests: + cpu: "10m" + memory: "128Mi" \ No newline at end of file diff --git a/_examples/complete/config/reloader/reloader_example.yaml b/_examples/complete/config/reloader/reloader_example.yaml new file mode 100644 index 0000000..fa6e179 --- /dev/null +++ b/_examples/complete/config/reloader/reloader_example.yaml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + labels: + app: nginx + annotations: + # configmap.reloader.stakater.com/reload: "test-configmap" + # secret.reloader.stakater.com/reload: "test-secret" + reloader.stakater.com/auto: "true" +spec: + replicas: 2 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx + ports: + - containerPort: 80 + env: + - name: TEST_ENV + valueFrom: + configMapKeyRef: + name: test-configmap + key: test_env + - name: TEST_PASS + valueFrom: + secretKeyRef: + name: test-secret + key: pass \ No newline at end of file diff --git a/_examples/complete/locals.tf b/_examples/complete/locals.tf new file mode 100644 index 0000000..fe12e21 --- /dev/null +++ b/_examples/complete/locals.tf @@ -0,0 +1,14 @@ +locals { + name = "helm-addons-test-1" + environment = "test" + region = "us-central1" + cluster_version = "1.28.3-gke.1203001" + gcp_project_id = "dev-env-3b53" + cluster_name = "test-cluster" + tags = { + Name = local.name + Environment = local.environment + GithubRepo = "terraform-helm-gke-addons" + GithubOrg = "clouddrove" + } +} \ No newline at end of file diff --git a/_examples/complete/main.tf b/_examples/complete/main.tf index a07c2e6..289652f 100644 --- a/_examples/complete/main.tf +++ b/_examples/complete/main.tf @@ -1,4 +1,214 @@ -# ------------------------------------------------------------------------------ -# Resources -# ------------------------------------------------------------------------------ -locals {} + +provider "google" { + project = local.gcp_project_id +} + +############################################################################### +# GCP NETWORKING RESOURCES +############################################################################### + + +module "vpc" { + source = "terraform-google-modules/network/google" + version = "~> 8.1" + + project_id = local.gcp_project_id + network_name = "${local.name}-vpc" + routing_mode = "GLOBAL" + + subnets = [ + { + subnet_name = "${local.name}-subnet-public-1" + subnet_ip = "10.10.10.0/24" + subnet_region = "us-central1" + }, + { + subnet_name = "${local.name}-subnet-private-1" + subnet_ip = "10.10.20.0/24" + subnet_region = "us-central1" + subnet_private_access = "true" + subnet_flow_logs = "true" + description = "This subnet has a description" + }, + { + subnet_name = "${local.name}-subnet-private-2" + subnet_ip = "10.10.30.0/24" + subnet_region = "us-central1" + subnet_private_access = "true" + subnet_flow_logs = "true" + description = "This subnet has used for GKE" + } + ] + + secondary_ranges = { + subnet-public-1 = [ + { + range_name = "${local.name}-subnet-private-1-secondary-01" + ip_cidr_range = "192.168.64.0/24" + }, + ], + subnet-public-2 = [ + { + range_name = "${local.name}-subnet-private-2-secondary-01" + ip_cidr_range = "192.168.128.0/24" + }, + ] + } + + routes = [ + { + name = "egress-internet" + description = "route through IGW to access internet" + destination_range = "0.0.0.0/0" + tags = "egress-inet" + next_hop_internet = "true" + }, + ] +} + +############################################################################### +# GCP GKE +############################################################################### + +module "gke" { + source = "terraform-google-modules/kubernetes-engine/google//modules/beta-private-cluster" + version = "29.0.0" + project_id = local.gcp_project_id + name = local.cluster_name + region = local.region + kubernetes_version = local.cluster_version + zones = [] + network = module.vpc.network_name + subnetwork = "${local.name}-subnet-private-2" + ip_range_pods = "" + ip_range_services = "" + horizontal_pod_autoscaling = true + http_load_balancing = true + filestore_csi_driver = true + istio = true + create_service_account = true + remove_default_node_pool = true + disable_legacy_metadata_endpoints = false + deletion_protection = false + + node_pools = [ + + { + name = "general" + machine_type = "g1-small" + node_locations = "${local.region}-a" + min_count = 1 + max_count = 5 + local_ssd_count = 0 + spot = false + disk_size_gb = 10 + disk_type = "pd-standard" + image_type = "ubuntu_containerd" + enable_gcfs = false + enable_gvnic = false + logging_variant = "DEFAULT" + auto_repair = true + auto_upgrade = true + create_service_account = true + preemptible = false + initial_node_count = 1 + enable_node_pool_autoscaling = true + }, + { + name = "critical" + machine_type = "g1-small" + node_locations = "${local.region}-b" + min_count = 1 + max_count = 3 + local_ssd_count = 0 + spot = false + disk_size_gb = 10 + disk_type = "pd-standard" + image_type = "ubuntu_containerd" + enable_gcfs = false + enable_gvnic = false + logging_variant = "DEFAULT" + auto_repair = true + auto_upgrade = true + create_service_account = true + preemptible = false + initial_node_count = 1 + enable_node_pool_autoscaling = false + }, + ] + + + node_pools_labels = { + all = {} + + default-node-pool = { + default-node-pool = true + } + } + + node_pools_metadata = { + all = {} + + default-node-pool = { + node-pool-metadata-custom-value = "my-node-pool" + } + } + + node_pools_taints = { + all = [] + + default-node-pool = [ + { + key = "default-node-pool" + value = true + effect = "PREFER_NO_SCHEDULE" + }, + ] + } + + node_pools_tags = { + all = [ + local.tags.Name, + local.tags.Environment, + local.tags.GithubRepo, + local.tags.GithubOrg, + ] + + default-node-pool = [ + "default-node-pool", + ] + } +} + + +############################################################################### +# GCP ADDONS +############################################################################### + +module "addons" { + source = "../../" + + gke_cluster_name = module.gke.name + project_id = local.gcp_project_id + region = local.region + + cluster_autoscaler = false + reloader = false + ingress_nginx = false + certification_manager = false + keda = false + + # -- Path of override-values.yaml file + cluster_autoscaler_helm_config = { values = [file("./config/override-cluster-autoscaler.yaml")] } + reloader_helm_config = { values = [file("./config/reloader/override-reloader.yaml")] } + ingress_nginx_helm_config = { values = [file("./config/override-ingress-nginx.yaml")] } + certification_manager_helm_config = { values = [file("./config/override-certification-manager.yaml")] } + keda_helm_config = { values = [file("./config/keda/override-keda.yaml")] } + + # -- Override Helm Release attributes + cluster_autoscaler_extra_configs = var.cluster_autoscaler_extra_configs + reloader_extra_configs = var.reloader_extra_configs + ingress_nginx_extra_configs = var.ingress_nginx_extra_configs + certification_manager_extra_configs = var.certification_manager_extra_configs + keda_extra_configs = var.keda_extra_configs +} \ No newline at end of file diff --git a/_examples/complete/providers.tf b/_examples/complete/providers.tf new file mode 100644 index 0000000..2d52438 --- /dev/null +++ b/_examples/complete/providers.tf @@ -0,0 +1,25 @@ +# Retrieve an access token as the Terraform runner +data "google_client_config" "provider" {} + +data "google_container_cluster" "my_cluster" { + name = module.gke.name + location = "us-central1" +} + +provider "kubernetes" { + host = "https://${data.google_container_cluster.my_cluster.endpoint}" + token = data.google_client_config.provider.access_token + cluster_ca_certificate = base64decode( + data.google_container_cluster.my_cluster.master_auth[0].cluster_ca_certificate, + ) +} + +provider "helm" { + kubernetes { + host = "https://${data.google_container_cluster.my_cluster.endpoint}" + token = data.google_client_config.provider.access_token + cluster_ca_certificate = base64decode( + data.google_container_cluster.my_cluster.master_auth[0].cluster_ca_certificate + ) + } +} \ No newline at end of file diff --git a/_examples/complete/variables.tf b/_examples/complete/variables.tf index dc91c44..7f010a5 100644 --- a/_examples/complete/variables.tf +++ b/_examples/complete/variables.tf @@ -1,3 +1,33 @@ # ------------------------------------------------------------------------------ # Variables # ------------------------------------------------------------------------------ + +# ------------------ CLUSTER AUTOSCALER ---------------------------------------- +variable "cluster_autoscaler_extra_configs" { + type = any + default = {} +} + +# ------------------ NGINX INGRESS --------------------------------------------- +variable "ingress_nginx_extra_configs" { + type = any + default = {} +} + +# ------------------ CERTIFICATION-MANAGER ----------------------------------------------------- +variable "certification_manager_extra_configs" { + type = any + default = {} +} + +# ------------------ RELOADER -------------------------------------------------- +variable "reloader_extra_configs" { + type = any + default = {} +} + +# ------------------ KEDA ----------------------------------------------------- +variable "keda_extra_configs" { + type = any + default = {} +} diff --git a/_examples/complete/versions.tf b/_examples/complete/versions.tf new file mode 100644 index 0000000..09897b0 --- /dev/null +++ b/_examples/complete/versions.tf @@ -0,0 +1,30 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + google = { + source = "hashicorp/google" + version = ">= 5.10.0" + } + time = { + source = "hashicorp/time" + version = ">= 0.8" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.6" + } + kubectl = { + source = "gavinbunney/kubectl" + version = ">= 1.7.0" + } + local = { + source = "hashicorp/local" + version = ">= 2.0.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0" + } + } +} \ No newline at end of file diff --git a/addons/cert-manager/README.md b/addons/cert-manager/README.md new file mode 100644 index 0000000..be2ad09 --- /dev/null +++ b/addons/cert-manager/README.md @@ -0,0 +1,38 @@ + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [google](#requirement\_google) | >= 5.10.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | + +## Providers + +No providers. + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [helm\_addon](#module\_helm\_addon) | ../helm | n/a | + +## Resources + +No resources. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [certification\_manager\_extra\_configs](#input\_certification\_manager\_extra\_configs) | Override attributes of helm\_release terraform resource | `any` | `{}` | no | +| [helm\_config](#input\_helm\_config) | Helm provider config for Cluster Autoscaler | `any` | `{}` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [chart\_version](#output\_chart\_version) | n/a | +| [namespace](#output\_namespace) | n/a | +| [repository](#output\_repository) | n/a | + \ No newline at end of file diff --git a/addons/cert-manager/config/cert_manager.yaml b/addons/cert-manager/config/cert_manager.yaml new file mode 100644 index 0000000..2d47d71 --- /dev/null +++ b/addons/cert-manager/config/cert_manager.yaml @@ -0,0 +1,737 @@ +# Default values for cert-manager. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +global: + # Reference to one or more secrets to be used when pulling images + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + imagePullSecrets: [] + # - name: "image-pull-secret" + + # Labels to apply to all resources + # Please note that this does not add labels to the resources created dynamically by the controllers. + # For these resources, you have to add the labels in the template in the cert-manager custom resource: + # eg. podTemplate/ ingressTemplate in ACMEChallengeSolverHTTP01Ingress + # ref: https://cert-manager.io/docs/reference/api-docs/#acme.cert-manager.io/v1.ACMEChallengeSolverHTTP01Ingress + # eg. secretTemplate in CertificateSpec + # ref: https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificateSpec + commonLabels: {} + # team_name: dev + + # Optional priority class to be used for the cert-manager pods + priorityClassName: "" + rbac: + create: true + # Aggregate ClusterRoles to Kubernetes default user-facing roles. Ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles + aggregateClusterRoles: true + + podSecurityPolicy: + enabled: false + useAppArmor: true + + # Set the verbosity of cert-manager. Range of 0 - 6 with 6 being the most verbose. + logLevel: 2 + + leaderElection: + # Override the namespace used for the leader election lease + namespace: "kube-system" + + # The duration that non-leader candidates will wait after observing a + # leadership renewal until attempting to acquire leadership of a led but + # unrenewed leader slot. This is effectively the maximum duration that a + # leader can be stopped before it is replaced by another candidate. + # leaseDuration: 60s + + # The interval between attempts by the acting master to renew a leadership + # slot before it stops leading. This must be less than or equal to the + # lease duration. + # renewDeadline: 40s + + # The duration the clients should wait between attempting acquisition and + # renewal of a leadership. + # retryPeriod: 15s + +installCRDs: false + +replicaCount: 1 + +strategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 0 + # maxUnavailable: 1 + +podDisruptionBudget: + enabled: false + + # minAvailable and maxUnavailable can either be set to an integer (e.g. 1) + # or a percentage value (e.g. 25%) + # if neither minAvailable or maxUnavailable is set, we default to `minAvailable: 1` + # minAvailable: 1 + # maxUnavailable: 1 + +# Comma separated list of feature gates that should be enabled on the +# controller pod. +featureGates: "" + +# The maximum number of challenges that can be scheduled as 'processing' at once +maxConcurrentChallenges: 60 + +image: + repository: quay.io/jetstack/cert-manager-controller + # You can manage a registry with + # registry: quay.io + # repository: jetstack/cert-manager-controller + + # Override the image tag to deploy by setting this variable. + # If no value is set, the chart's appVersion will be used. + # tag: canary + + # Setting a digest will override any tag + # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 + pullPolicy: IfNotPresent + +# Override the namespace used to store DNS provider credentials etc. for ClusterIssuer +# resources. By default, the same namespace as cert-manager is deployed within is +# used. This namespace will not be automatically created by the Helm chart. +clusterResourceNamespace: "" + +# This namespace allows you to define where the services will be installed into +# if not set then they will use the namespace of the release +# This is helpful when installing cert manager as a chart dependency (sub chart) +namespace: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + # name: "" + # Optional additional annotations to add to the controller's ServiceAccount + # annotations: {} + # Automount API credentials for a Service Account. + # Optional additional labels to add to the controller's ServiceAccount + # labels: {} + automountServiceAccountToken: true + +# Automounting API credentials for a particular pod +# automountServiceAccountToken: true + +# When this flag is enabled, secrets will be automatically removed when the certificate resource is deleted +enableCertificateOwnerRef: false + +# Used to configure options for the controller pod. +# This allows setting options that'd usually be provided via flags. +# An APIVersion and Kind must be specified in your values.yaml file. +# Flags will override options that are set here. +config: +# apiVersion: controller.config.cert-manager.io/v1alpha1 +# kind: ControllerConfiguration +# logging: +# verbosity: 2 +# format: text +# leaderElectionConfig: +# namespace: kube-system +# kubernetesAPIQPS: 9000 +# kubernetesAPIBurst: 9000 +# numberOfConcurrentWorkers: 200 +# featureGates: +# additionalCertificateOutputFormats: true +# experimentalCertificateSigningRequestControllers: true +# experimentalGatewayAPISupport: true +# serverSideApply: true +# literalCertificateSubject: true +# useCertificateRequestBasicConstraints: true + +# Setting Nameservers for DNS01 Self Check +# See: https://cert-manager.io/docs/configuration/acme/dns01/#setting-nameservers-for-dns01-self-check + +# Comma separated string with host and port of the recursive nameservers cert-manager should query +dns01RecursiveNameservers: "" + +# Forces cert-manager to only use the recursive nameservers for verification. +# Enabling this option could cause the DNS01 self check to take longer due to caching performed by the recursive nameservers +dns01RecursiveNameserversOnly: false + +# Additional command line flags to pass to cert-manager controller binary. +# To see all available flags run docker run quay.io/jetstack/cert-manager-controller: --help +extraArgs: [] + # Use this flag to enable or disable arbitrary controllers, for example, disable the CertificiateRequests approver + # - --controllers=*,-certificaterequests-approver + +extraEnv: [] +# - name: SOME_VAR +# value: 'some value' + +resources: {} + # requests: + # cpu: 10m + # memory: 32Mi + +# Pod Security Context +# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + +# Container Security Context to be set on the controller component container +# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + + +volumes: [] + +volumeMounts: [] + +# Optional additional annotations to add to the controller Deployment +# deploymentAnnotations: {} + +# Optional additional annotations to add to the controller Pods +# podAnnotations: {} + +podLabels: {} + +# Optional annotations to add to the controller Service +# serviceAnnotations: {} + +# Optional additional labels to add to the controller Service +# serviceLabels: {} + +# Optional DNS settings, useful if you have a public and private DNS zone for +# the same domain on Route 53. What follows is an example of ensuring +# cert-manager can access an ingress or DNS TXT records at all times. +# NOTE: This requires Kubernetes 1.10 or `CustomPodDNS` feature gate enabled for +# the cluster to work. +# podDnsPolicy: "None" +# podDnsConfig: +# nameservers: +# - "1.1.1.1" +# - "8.8.8.8" + +nodeSelector: + kubernetes.io/os: linux + +ingressShim: {} + # defaultIssuerName: "" + # defaultIssuerKind: "" + # defaultIssuerGroup: "" + +prometheus: + enabled: true + servicemonitor: + enabled: false + prometheusInstance: default + targetPort: 9402 + path: /metrics + interval: 60s + scrapeTimeout: 30s + labels: {} + annotations: {} + honorLabels: false + endpointAdditionalProperties: {} + +# Use these variables to configure the HTTP_PROXY environment variables +# http_proxy: "http://proxy:8080" +# https_proxy: "https://proxy:8080" +# no_proxy: 127.0.0.1,localhost + +# A Kubernetes Affinty, if required; see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#affinity-v1-core +# for example: +# affinity: +# nodeAffinity: +# requiredDuringSchedulingIgnoredDuringExecution: +# nodeSelectorTerms: +# - matchExpressions: +# - key: foo.bar.com/role +# operator: In +# values: +# - master +affinity: {} + +# A list of Kubernetes Tolerations, if required; see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#toleration-v1-core +# for example: +# tolerations: +# - key: foo.bar.com/role +# operator: Equal +# value: master +# effect: NoSchedule +tolerations: [] + +# A list of Kubernetes TopologySpreadConstraints, if required; see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#topologyspreadconstraint-v1-core +# for example: +# topologySpreadConstraints: +# - maxSkew: 2 +# topologyKey: topology.kubernetes.io/zone +# whenUnsatisfiable: ScheduleAnyway +# labelSelector: +# matchLabels: +# app.kubernetes.io/instance: cert-manager +# app.kubernetes.io/component: controller +topologySpreadConstraints: [] + +# LivenessProbe settings for the controller container of the controller Pod. +# +# Disabled by default, because the controller has a leader election mechanism +# which should cause it to exit if it is unable to renew its leader election +# record. +# LivenessProbe durations and thresholds are based on those used for the Kubernetes +# controller-manager. See: +# https://github.com/kubernetes/kubernetes/blob/806b30170c61a38fedd54cc9ede4cd6275a1ad3b/cmd/kubeadm/app/util/staticpod/utils.go#L241-L245 +livenessProbe: + enabled: false + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 15 + successThreshold: 1 + failureThreshold: 8 + +# enableServiceLinks indicates whether information about services should be +# injected into pod's environment variables, matching the syntax of Docker +# links. +enableServiceLinks: false + +webhook: + replicaCount: 1 + timeoutSeconds: 10 + + # Used to configure options for the webhook pod. + # This allows setting options that'd usually be provided via flags. + # An APIVersion and Kind must be specified in your values.yaml file. + # Flags will override options that are set here. + config: + # apiVersion: webhook.config.cert-manager.io/v1alpha1 + # kind: WebhookConfiguration + + # The port that the webhook should listen on for requests. + # In GKE private clusters, by default kubernetes apiservers are allowed to + # talk to the cluster nodes only on 443 and 10250. so configuring + # securePort: 10250, will work out of the box without needing to add firewall + # rules or requiring NET_BIND_SERVICE capabilities to bind port numbers <1000. + # This should be uncommented and set as a default by the chart once we graduate + # the apiVersion of WebhookConfiguration past v1alpha1. + # securePort: 10250 + + strategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 0 + # maxUnavailable: 1 + + # Pod Security Context to be set on the webhook component Pod + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + + podDisruptionBudget: + enabled: false + + # minAvailable and maxUnavailable can either be set to an integer (e.g. 1) + # or a percentage value (e.g. 25%) + # if neither minAvailable or maxUnavailable is set, we default to `minAvailable: 1` + # minAvailable: 1 + # maxUnavailable: 1 + + # Container Security Context to be set on the webhook component container + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + + # Optional additional annotations to add to the webhook Deployment + # deploymentAnnotations: {} + + # Optional additional annotations to add to the webhook Pods + # podAnnotations: {} + + # Optional additional annotations to add to the webhook Service + # serviceAnnotations: {} + + # Optional additional annotations to add to the webhook MutatingWebhookConfiguration + # mutatingWebhookConfigurationAnnotations: {} + + # Optional additional annotations to add to the webhook ValidatingWebhookConfiguration + # validatingWebhookConfigurationAnnotations: {} + + # Additional command line flags to pass to cert-manager webhook binary. + # To see all available flags run docker run quay.io/jetstack/cert-manager-webhook: --help + extraArgs: [] + # Path to a file containing a WebhookConfiguration object used to configure the webhook + # - --config= + + # Comma separated list of feature gates that should be enabled on the + # webhook pod. + featureGates: "" + + resources: {} + # requests: + # cpu: 10m + # memory: 32Mi + + ## Liveness and readiness probe values + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 60 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 5 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 1 + + nodeSelector: + kubernetes.io/os: linux + + affinity: {} + + tolerations: [] + + topologySpreadConstraints: [] + + # Optional additional labels to add to the Webhook Pods + podLabels: {} + + # Optional additional labels to add to the Webhook Service + serviceLabels: {} + + image: + repository: quay.io/jetstack/cert-manager-webhook + # You can manage a registry with + # registry: quay.io + # repository: jetstack/cert-manager-webhook + + # Override the image tag to deploy by setting this variable. + # If no value is set, the chart's appVersion will be used. + # tag: canary + + # Setting a digest will override any tag + # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 + + pullPolicy: IfNotPresent + + serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + # name: "" + # Optional additional annotations to add to the controller's ServiceAccount + # annotations: {} + # Optional additional labels to add to the webhook's ServiceAccount + # labels: {} + # Automount API credentials for a Service Account. + automountServiceAccountToken: true + + # Automounting API credentials for a particular pod + # automountServiceAccountToken: true + + # The port that the webhook should listen on for requests. + # In GKE private clusters, by default kubernetes apiservers are allowed to + # talk to the cluster nodes only on 443 and 10250. so configuring + # securePort: 10250, will work out of the box without needing to add firewall + # rules or requiring NET_BIND_SERVICE capabilities to bind port numbers <1000 + securePort: 10250 + + # Specifies if the webhook should be started in hostNetwork mode. + # + # Required for use in some managed kubernetes clusters (such as AWS EKS) with custom + # CNI (such as calico), because control-plane managed by AWS cannot communicate + # with pods' IP CIDR and admission webhooks are not working + # + # Since the default port for the webhook conflicts with kubelet on the host + # network, `webhook.securePort` should be changed to an available port if + # running in hostNetwork mode. + hostNetwork: false + + # Specifies how the service should be handled. Useful if you want to expose the + # webhook to outside of the cluster. In some cases, the control plane cannot + # reach internal services. + serviceType: ClusterIP + # loadBalancerIP: + + # Overrides the mutating webhook and validating webhook so they reach the webhook + # service using the `url` field instead of a service. + url: {} + # host: + + # Enables default network policies for webhooks. + networkPolicy: + enabled: false + ingress: + - from: + - ipBlock: + cidr: 0.0.0.0/0 + egress: + - ports: + - port: 80 + protocol: TCP + - port: 443 + protocol: TCP + - port: 53 + protocol: TCP + - port: 53 + protocol: UDP + # On OpenShift and OKD, the Kubernetes API server listens on + # port 6443. + - port: 6443 + protocol: TCP + to: + - ipBlock: + cidr: 0.0.0.0/0 + + volumes: [] + volumeMounts: [] + + # enableServiceLinks indicates whether information about services should be + # injected into pod's environment variables, matching the syntax of Docker + # links. + enableServiceLinks: false + +cainjector: + enabled: true + replicaCount: 1 + + strategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 0 + # maxUnavailable: 1 + + # Pod Security Context to be set on the cainjector component Pod + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + + podDisruptionBudget: + enabled: false + + # minAvailable and maxUnavailable can either be set to an integer (e.g. 1) + # or a percentage value (e.g. 25%) + # if neither minAvailable or maxUnavailable is set, we default to `minAvailable: 1` + # minAvailable: 1 + # maxUnavailable: 1 + + # Container Security Context to be set on the cainjector component container + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + + + # Optional additional annotations to add to the cainjector Deployment + # deploymentAnnotations: {} + + # Optional additional annotations to add to the cainjector Pods + # podAnnotations: {} + + # Additional command line flags to pass to cert-manager cainjector binary. + # To see all available flags run docker run quay.io/jetstack/cert-manager-cainjector: --help + extraArgs: [] + # Enable profiling for cainjector + # - --enable-profiling=true + + resources: {} + # requests: + # cpu: 10m + # memory: 32Mi + + nodeSelector: + kubernetes.io/os: linux + + affinity: {} + + tolerations: [] + + topologySpreadConstraints: [] + + # Optional additional labels to add to the CA Injector Pods + podLabels: {} + + image: + repository: quay.io/jetstack/cert-manager-cainjector + # You can manage a registry with + # registry: quay.io + # repository: jetstack/cert-manager-cainjector + + # Override the image tag to deploy by setting this variable. + # If no value is set, the chart's appVersion will be used. + # tag: canary + + # Setting a digest will override any tag + # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 + + pullPolicy: IfNotPresent + + serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + # name: "" + # Optional additional annotations to add to the controller's ServiceAccount + # annotations: {} + # Automount API credentials for a Service Account. + # Optional additional labels to add to the cainjector's ServiceAccount + # labels: {} + automountServiceAccountToken: true + + # Automounting API credentials for a particular pod + # automountServiceAccountToken: true + + volumes: [] + volumeMounts: [] + + # enableServiceLinks indicates whether information about services should be + # injected into pod's environment variables, matching the syntax of Docker + # links. + enableServiceLinks: false + +acmesolver: + image: + repository: quay.io/jetstack/cert-manager-acmesolver + # You can manage a registry with + # registry: quay.io + # repository: jetstack/cert-manager-acmesolver + + # Override the image tag to deploy by setting this variable. + # If no value is set, the chart's appVersion will be used. + # tag: canary + + # Setting a digest will override any tag + # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 + +# This startupapicheck is a Helm post-install hook that waits for the webhook +# endpoints to become available. +# The check is implemented using a Kubernetes Job- if you are injecting mesh +# sidecar proxies into cert-manager pods, you probably want to ensure that they +# are not injected into this Job's pod. Otherwise the installation may time out +# due to the Job never being completed because the sidecar proxy does not exit. +# See https://github.com/cert-manager/cert-manager/pull/4414 for context. +startupapicheck: + enabled: true + + # Pod Security Context to be set on the startupapicheck component Pod + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + + # Container Security Context to be set on the controller component container + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + + # Timeout for 'kubectl check api' command + timeout: 1m + + # Job backoffLimit + backoffLimit: 4 + + # Optional additional annotations to add to the startupapicheck Job + jobAnnotations: + helm.sh/hook: post-install + helm.sh/hook-weight: "1" + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + + # Optional additional annotations to add to the startupapicheck Pods + # podAnnotations: {} + + # Additional command line flags to pass to startupapicheck binary. + # To see all available flags run docker run quay.io/jetstack/cert-manager-ctl: --help + extraArgs: [] + + resources: {} + # requests: + # cpu: 10m + # memory: 32Mi + + nodeSelector: + kubernetes.io/os: linux + + affinity: {} + + tolerations: [] + + # Optional additional labels to add to the startupapicheck Pods + podLabels: {} + + image: + repository: quay.io/jetstack/cert-manager-ctl + # You can manage a registry with + # registry: quay.io + # repository: jetstack/cert-manager-ctl + + # Override the image tag to deploy by setting this variable. + # If no value is set, the chart's appVersion will be used. + # tag: canary + + # Setting a digest will override any tag + # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 + + pullPolicy: IfNotPresent + + rbac: + # annotations for the startup API Check job RBAC and PSP resources + annotations: + helm.sh/hook: post-install + helm.sh/hook-weight: "-5" + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + + # Automounting API credentials for a particular pod + # automountServiceAccountToken: true + + serviceAccount: + # Specifies whether a service account should be created + create: true + + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + # name: "" + + # Optional additional annotations to add to the Job's ServiceAccount + annotations: + helm.sh/hook: post-install + helm.sh/hook-weight: "-5" + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + + # Automount API credentials for a Service Account. + automountServiceAccountToken: true + + # Optional additional labels to add to the startupapicheck's ServiceAccount + # labels: {} + + volumes: [] + volumeMounts: [] + + # enableServiceLinks indicates whether information about services should be + # injected into pod's environment variables, matching the syntax of Docker + # links. + enableServiceLinks: false diff --git a/addons/cert-manager/locals.tf b/addons/cert-manager/locals.tf new file mode 100644 index 0000000..f927d11 --- /dev/null +++ b/addons/cert-manager/locals.tf @@ -0,0 +1,41 @@ +locals { + name = "cert-manager" + + default_helm_config = { + name = try(var.certification_manager_extra_configs.name, local.name) + chart = try(var.certification_manager_extra_configs.chart, local.name) + repository = try(var.certification_manager_extra_configs.repository, "https://charts.jetstack.io") + version = try(var.certification_manager_extra_configs.version, "1.13.1") + namespace = try(var.certification_manager_extra_configs.namespace, "cert-manager") + create_namespace = try(var.certification_manager_extra_configs.create_namespace, true) + description = "Certification manager helm Chart deployment configuration" + timeout = try(var.certification_manager_extra_configs.timeout, "600") + lint = try(var.certification_manager_extra_configs.lint, "false") + repository_key_file = try(var.certification_manager_extra_configs.repository_key_file, "") + repository_cert_file = try(var.certification_manager_extra_configs.repository_cert_file, "") + repository_username = try(var.certification_manager_extra_configs.repository_username, "") + repository_password = try(var.certification_manager_extra_configs.repository_password, "") + verify = try(var.certification_manager_extra_configs.verify, "false") + keyring = try(var.certification_manager_extra_configs.keyring, "") + disable_webhooks = try(var.certification_manager_extra_configs.disable_webhooks, "false") + reuse_values = try(var.certification_manager_extra_configs.reuse_values, "false") + reset_values = try(var.certification_manager_extra_configs.reset_values, "false") + force_update = try(var.certification_manager_extra_configs.force_update, "false") + recreate_pods = try(var.certification_manager_extra_configs.recreate_pods, "false") + cleanup_on_fail = try(var.certification_manager_extra_configs.cleanup_on_fail, "false") + max_history = try(var.certification_manager_extra_configs.max_history, "0") + atomic = try(var.certification_manager_extra_configs.atomic, "false") + skip_crds = try(var.certification_manager_extra_configs.skip_crds, "false") + render_subchart_notes = try(var.certification_manager_extra_configs.render_subchart_notes, "true") + disable_openapi_validation = try(var.certification_manager_extra_configs.disable_openapi_validation, "false") + wait = try(var.certification_manager_extra_configs.wait, "true") + wait_for_jobs = try(var.certification_manager_extra_configs.wait_for_jobs, "false") + dependency_update = try(var.certification_manager_extra_configs.dependency_update, "false") + replace = try(var.certification_manager_extra_configs.replace, "false") + } + + helm_config = merge( + local.default_helm_config, + var.helm_config + ) +} \ No newline at end of file diff --git a/addons/cert-manager/main.tf b/addons/cert-manager/main.tf new file mode 100644 index 0000000..83b8786 --- /dev/null +++ b/addons/cert-manager/main.tf @@ -0,0 +1,4 @@ +module "helm_addon" { + source = "../helm" + helm_config = local.helm_config +} \ No newline at end of file diff --git a/addons/cert-manager/output.tf b/addons/cert-manager/output.tf new file mode 100644 index 0000000..a3832b1 --- /dev/null +++ b/addons/cert-manager/output.tf @@ -0,0 +1,11 @@ +output "namespace" { + value = local.default_helm_config.namespace +} + +output "chart_version" { + value = local.default_helm_config.version +} + +output "repository" { + value = local.default_helm_config.repository +} \ No newline at end of file diff --git a/addons/cert-manager/variables.tf b/addons/cert-manager/variables.tf new file mode 100644 index 0000000..0c6a012 --- /dev/null +++ b/addons/cert-manager/variables.tf @@ -0,0 +1,12 @@ +variable "helm_config" { + description = "Helm provider config for Cluster Autoscaler" + type = any + default = {} +} + +variable "certification_manager_extra_configs" { + description = "Override attributes of helm_release terraform resource" + type = any + default = {} +} + diff --git a/addons/cert-manager/versions.tf b/addons/cert-manager/versions.tf new file mode 100644 index 0000000..953af1f --- /dev/null +++ b/addons/cert-manager/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + google = { + source = "hashicorp/google" + version = ">= 5.10.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + } +} \ No newline at end of file diff --git a/addons/cluster-autoscaler/README.md b/addons/cluster-autoscaler/README.md new file mode 100644 index 0000000..38e1909 --- /dev/null +++ b/addons/cluster-autoscaler/README.md @@ -0,0 +1,44 @@ + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [google](#requirement\_google) | >= 5.10.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | + +## Providers + +| Name | Version | +|------|---------| +| [google](#provider\_google) | >= 5.10.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [helm\_addon](#module\_helm\_addon) | ../helm | n/a | + +## Resources + +| Name | Type | +|------|------| +| [google_project_iam_member.member-role](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/project_iam_member) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [cluster\_autoscaler\_extra\_configs](#input\_cluster\_autoscaler\_extra\_configs) | Override attributes of helm\_release terraform resource | `any` | `{}` | no | +| [gke\_cluster\_name](#input\_gke\_cluster\_name) | n/a | `string` | `""` | no | +| [helm\_config](#input\_helm\_config) | Helm provider config for Cluster Autoscaler | `any` | `{}` | no | +| [project\_id](#input\_project\_id) | GCP project ID | `string` | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| [chart\_version](#output\_chart\_version) | n/a | +| [namespace](#output\_namespace) | n/a | +| [repository](#output\_repository) | n/a | + \ No newline at end of file diff --git a/addons/cluster-autoscaler/config/cluster_autoscaler.yaml b/addons/cluster-autoscaler/config/cluster_autoscaler.yaml new file mode 100644 index 0000000..15e3177 --- /dev/null +++ b/addons/cluster-autoscaler/config/cluster_autoscaler.yaml @@ -0,0 +1,399 @@ +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +# affinity -- Affinity for pod assignment +affinity: {} + +# additionalLabels -- Labels to add to each object of the chart. +additionalLabels: {} + +autoDiscovery: + # cloudProviders `aws`, `gce`, `azure`, `magnum`, `clusterapi` and `oci` are supported by auto-discovery at this time + # AWS: Set tags as described in https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#auto-discovery-setup + + # autoDiscovery.clusterName -- Enable autodiscovery for `cloudProvider=aws`, for groups matching `autoDiscovery.tags`. + # autoDiscovery.clusterName -- Enable autodiscovery for `cloudProvider=azure`, using tags defined in https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/azure/README.md#auto-discovery-setup. + # Enable autodiscovery for `cloudProvider=clusterapi`, for groups matching `autoDiscovery.labels`. + # Enable autodiscovery for `cloudProvider=gce`, but no MIG tagging required. + # Enable autodiscovery for `cloudProvider=magnum`, for groups matching `autoDiscovery.roles`. + clusterName: # cluster.local + + # autoDiscovery.tags -- ASG tags to match, run through `tpl`. + tags: + - k8s.io/cluster-autoscaler/enabled + - k8s.io/cluster-autoscaler/{{ .Values.autoDiscovery.clusterName }} + # - kubernetes.io/cluster/{{ .Values.autoDiscovery.clusterName }} + + # autoDiscovery.roles -- Magnum node group roles to match. + roles: + - worker + + # autoDiscovery.labels -- Cluster-API labels to match https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/README.md#configuring-node-group-auto-discovery + labels: [] + # - color: green + # - shape: circle +# autoscalingGroups -- For AWS, Azure AKS or Magnum. At least one element is required if not using `autoDiscovery`. For example: +#
+# - name: asg1
+# maxSize: 2
+# minSize: 1 +#
+autoscalingGroups: [] +# - name: asg1 +# maxSize: 2 +# minSize: 1 +# - name: asg2 +# maxSize: 2 +# minSize: 1 + +# autoscalingGroupsnamePrefix -- For GCE. At least one element is required if not using `autoDiscovery`. For example: +#
+# - name: ig01
+# maxSize: 10
+# minSize: 0 +#
+autoscalingGroupsnamePrefix: [] +# - name: ig01 +# maxSize: 10 +# minSize: 0 +# - name: ig02 +# maxSize: 10 +# minSize: 0 + +# awsAccessKeyID -- AWS access key ID ([if AWS user keys used](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#using-aws-credentials)) +awsAccessKeyID: "" + +# awsRegion -- AWS region (required if `cloudProvider=aws`) +awsRegion: us-east-1 + +# awsSecretAccessKey -- AWS access secret key ([if AWS user keys used](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#using-aws-credentials)) +awsSecretAccessKey: "" + +# azureClientID -- Service Principal ClientID with contributor permission to Cluster and Node ResourceGroup. +# Required if `cloudProvider=azure` +azureClientID: "" + +# azureClientSecret -- Service Principal ClientSecret with contributor permission to Cluster and Node ResourceGroup. +# Required if `cloudProvider=azure` +azureClientSecret: "" + +# azureResourceGroup -- Azure resource group that the cluster is located. +# Required if `cloudProvider=azure` +azureResourceGroup: "" + +# azureSubscriptionID -- Azure subscription where the resources are located. +# Required if `cloudProvider=azure` +azureSubscriptionID: "" + +# azureTenantID -- Azure tenant where the resources are located. +# Required if `cloudProvider=azure` +azureTenantID: "" + +# azureUseManagedIdentityExtension -- Whether to use Azure's managed identity extension for credentials. If using MSI, ensure subscription ID, resource group, and azure AKS cluster name are set. You can only use one authentication method at a time, either azureUseWorkloadIdentityExtension or azureUseManagedIdentityExtension should be set. +azureUseManagedIdentityExtension: false + +# azureUseWorkloadIdentityExtension -- Whether to use Azure's workload identity extension for credentials. See the project here: https://github.com/Azure/azure-workload-identity for more details. You can only use one authentication method at a time, either azureUseWorkloadIdentityExtension or azureUseManagedIdentityExtension should be set. +azureUseWorkloadIdentityExtension: false + +# azureVMType -- Azure VM type. +azureVMType: "vmss" + +# cloudConfigPath -- Configuration file for cloud provider. +cloudConfigPath: "" + +# cloudProvider -- The cloud provider where the autoscaler runs. +# Currently only `gce`, `aws`, `azure`, `magnum` and `clusterapi` are supported. +# `aws` supported for AWS. `gce` for GCE. `azure` for Azure AKS. +# `magnum` for OpenStack Magnum, `clusterapi` for Cluster API. +cloudProvider: aws + +# clusterAPICloudConfigPath -- Path to kubeconfig for connecting to Cluster API Management Cluster, only used if `clusterAPIMode=kubeconfig-kubeconfig or incluster-kubeconfig` +clusterAPICloudConfigPath: /etc/kubernetes/mgmt-kubeconfig + +# clusterAPIConfigMapsNamespace -- Namespace on the workload cluster to store Leader election and status configmaps +clusterAPIConfigMapsNamespace: "" + +# clusterAPIKubeconfigSecret -- Secret containing kubeconfig for connecting to Cluster API managed workloadcluster +# Required if `cloudProvider=clusterapi` and `clusterAPIMode=kubeconfig-kubeconfig,kubeconfig-incluster or incluster-kubeconfig` +clusterAPIKubeconfigSecret: "" + +# clusterAPIMode -- Cluster API mode, see https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/README.md#connecting-cluster-autoscaler-to-cluster-api-management-and-workload-clusters +# Syntax: workloadClusterMode-ManagementClusterMode +# for `kubeconfig-kubeconfig`, `incluster-kubeconfig` and `single-kubeconfig` you always must mount the external kubeconfig using either `extraVolumeSecrets` or `extraMounts` and `extraVolumes` +# if you dont set `clusterAPIKubeconfigSecret`and thus use an in-cluster config or want to use a non capi generated kubeconfig you must do so for the workload kubeconfig as well +clusterAPIMode: incluster-incluster # incluster-incluster, incluster-kubeconfig, kubeconfig-incluster, kubeconfig-kubeconfig, single-kubeconfig + +# clusterAPIWorkloadKubeconfigPath -- Path to kubeconfig for connecting to Cluster API managed workloadcluster, only used if `clusterAPIMode=kubeconfig-kubeconfig or kubeconfig-incluster` +clusterAPIWorkloadKubeconfigPath: /etc/kubernetes/value + +# containerSecurityContext -- [Security context for container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) +containerSecurityContext: {} + # capabilities: + # drop: + # - ALL + +deployment: + # deployment.annotations -- Annotations to add to the Deployment object. + annotations: {} + +# dnsPolicy -- Defaults to `ClusterFirst`. Valid values are: +# `ClusterFirstWithHostNet`, `ClusterFirst`, `Default` or `None`. +# If autoscaler does not depend on cluster DNS, recommended to set this to `Default`. +dnsPolicy: ClusterFirst + +# envFromConfigMap -- ConfigMap name to use as envFrom. +envFromConfigMap: "" + +# envFromSecret -- Secret name to use as envFrom. +envFromSecret: "" + +## Priorities Expander +# expanderPriorities -- The expanderPriorities is used if `extraArgs.expander` contains `priority` and expanderPriorities is also set with the priorities. +# If `extraArgs.expander` contains `priority`, then expanderPriorities is used to define cluster-autoscaler-priority-expander priorities. +# See: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/expander/priority/readme.md +expanderPriorities: {} + +# extraArgs -- Additional container arguments. +# Refer to https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-are-the-parameters-to-ca for the full list of cluster autoscaler +# parameters and their default values. +# Everything after the first _ will be ignored allowing the use of multi-string arguments. +extraArgs: + logtostderr: true + stderrthreshold: info + v: 4 + # write-status-configmap: true + # status-config-map-name: cluster-autoscaler-status + # leader-elect: true + # leader-elect-resource-lock: endpoints + # skip-nodes-with-local-storage: true + # expander: random + # scale-down-enabled: true + # balance-similar-node-groups: true + # min-replica-count: 0 + # scale-down-utilization-threshold: 0.5 + # scale-down-non-empty-candidates-count: 30 + # max-node-provision-time: 15m0s + # scan-interval: 10s + # scale-down-delay-after-add: 10m + # scale-down-delay-after-delete: 0s + # scale-down-delay-after-failure: 3m + # scale-down-unneeded-time: 10m + # skip-nodes-with-system-pods: true + # balancing-ignore-label_1: first-label-to-ignore + # balancing-ignore-label_2: second-label-to-ignore + +# extraEnv -- Additional container environment variables. +extraEnv: {} + +# extraEnvConfigMaps -- Additional container environment variables from ConfigMaps. +extraEnvConfigMaps: {} + +# extraEnvSecrets -- Additional container environment variables from Secrets. +extraEnvSecrets: {} + +# extraVolumeMounts -- Additional volumes to mount. +extraVolumeMounts: [] + # - name: ssl-certs + # mountPath: /etc/ssl/certs/ca-certificates.crt + # readOnly: true + +# extraVolumes -- Additional volumes. +extraVolumes: [] + # - name: ssl-certs + # hostPath: + # path: /etc/ssl/certs/ca-bundle.crt + +# extraVolumeSecrets -- Additional volumes to mount from Secrets. +extraVolumeSecrets: {} + # autoscaler-vol: + # mountPath: /data/autoscaler/ + # custom-vol: + # name: custom-secret + # mountPath: /data/custom/ + # items: + # - key: subkey + # path: mypath + +# fullnameOverride -- String to fully override `cluster-autoscaler.fullname` template. +fullnameOverride: "" + +# hostNetwork -- Whether to expose network interfaces of the host machine to pods. +hostNetwork: false + +image: + # image.repository -- Image repository + repository: registry.k8s.io/autoscaling/cluster-autoscaler + # image.tag -- Image tag + tag: v1.28.2 + # image.pullPolicy -- Image pull policy + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # image.pullSecrets -- Image pull secrets + pullSecrets: [] + # - myRegistrKeySecretName + +# kubeTargetVersionOverride -- Allow overriding the `.Capabilities.KubeVersion.GitVersion` check. Useful for `helm template` commands. +kubeTargetVersionOverride: "" + +# kwokConfigMapName -- configmap for configuring kwok provider +kwokConfigMapName: "kwok-provider-config" + +# magnumCABundlePath -- Path to the host's CA bundle, from `ca-file` in the cloud-config file. +magnumCABundlePath: "/etc/kubernetes/ca-bundle.crt" + +# magnumClusterName -- Cluster name or ID in Magnum. +# Required if `cloudProvider=magnum` and not setting `autoDiscovery.clusterName`. +magnumClusterName: "" + +# nameOverride -- String to partially override `cluster-autoscaler.fullname` template (will maintain the release name) +nameOverride: "" + +# nodeSelector -- Node labels for pod assignment. Ref: https://kubernetes.io/docs/user-guide/node-selection/. +nodeSelector: {} + +# podAnnotations -- Annotations to add to each pod. +podAnnotations: {} + +# podDisruptionBudget -- Pod disruption budget. +podDisruptionBudget: + maxUnavailable: 1 + # minAvailable: 2 + +# podLabels -- Labels to add to each pod. +podLabels: {} + +# priorityClassName -- priorityClassName +priorityClassName: "system-cluster-critical" + +# priorityConfigMapAnnotations -- Annotations to add to `cluster-autoscaler-priority-expander` ConfigMap. +priorityConfigMapAnnotations: {} + # key1: "value1" + # key2: "value2" + +## Custom PrometheusRule to be defined +## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart +## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions +prometheusRule: + # prometheusRule.enabled -- If true, creates a Prometheus Operator PrometheusRule. + enabled: false + # prometheusRule.additionalLabels -- Additional labels to be set in metadata. + additionalLabels: {} + # prometheusRule.namespace -- Namespace which Prometheus is running in. + namespace: monitoring + # prometheusRule.interval -- How often rules in the group are evaluated (falls back to `global.evaluation_interval` if not set). + interval: null + # prometheusRule.rules -- Rules spec template (see https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#rule). + rules: [] + +rbac: + # rbac.create -- If `true`, create and use RBAC resources. + create: true + # rbac.pspEnabled -- If `true`, creates and uses RBAC resources required in the cluster with [Pod Security Policies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) enabled. + # Must be used with `rbac.create` set to `true`. + pspEnabled: false + # rbac.clusterScoped -- if set to false will only provision RBAC to alter resources in the current namespace. Most useful for Cluster-API + clusterScoped: true + serviceAccount: + # rbac.serviceAccount.annotations -- Additional Service Account annotations. + annotations: {} + # rbac.serviceAccount.create -- If `true` and `rbac.create` is also true, a Service Account will be created. + create: true + # rbac.serviceAccount.name -- The name of the ServiceAccount to use. If not set and create is `true`, a name is generated using the fullname template. + name: "" + # rbac.serviceAccount.automountServiceAccountToken -- Automount API credentials for a Service Account. + automountServiceAccountToken: true + +# replicaCount -- Desired number of pods +replicaCount: 1 + +# resources -- Pod resource requests and limits. +resources: {} + # limits: + # cpu: 100m + # memory: 300Mi + # requests: + # cpu: 100m + # memory: 300Mi + +# revisionHistoryLimit -- The number of revisions to keep. +revisionHistoryLimit: 10 + +# securityContext -- [Security context for pod](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) +securityContext: {} + # runAsNonRoot: true + # runAsUser: 1001 + # runAsGroup: 1001 + +service: + # service.create -- If `true`, a Service will be created. + create: true + # service.annotations -- Annotations to add to service + annotations: {} + # service.labels -- Labels to add to service + labels: {} + # service.externalIPs -- List of IP addresses at which the service is available. Ref: https://kubernetes.io/docs/user-guide/services/#external-ips. + externalIPs: [] + + # service.loadBalancerIP -- IP address to assign to load balancer (if supported). + loadBalancerIP: "" + # service.loadBalancerSourceRanges -- List of IP CIDRs allowed access to load balancer (if supported). + loadBalancerSourceRanges: [] + # service.servicePort -- Service port to expose. + servicePort: 8085 + # service.portName -- Name for service port. + portName: http + # service.type -- Type of service to create. + type: ClusterIP + +## Are you using Prometheus Operator? +serviceMonitor: + # serviceMonitor.enabled -- If true, creates a Prometheus Operator ServiceMonitor. + enabled: false + # serviceMonitor.interval -- Interval that Prometheus scrapes Cluster Autoscaler metrics. + interval: 10s + # serviceMonitor.namespace -- Namespace which Prometheus is running in. + namespace: monitoring + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + # serviceMonitor.selector -- Default to kube-prometheus install (CoreOS recommended), but should be set according to Prometheus install. + selector: + release: prometheus-operator + # serviceMonitor.path -- The path to scrape for metrics; autoscaler exposes `/metrics` (this is standard) + path: /metrics + # serviceMonitor.annotations -- Annotations to add to service monitor + annotations: {} + ## [RelabelConfig](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.RelabelConfig) + # serviceMonitor.metricRelabelings -- MetricRelabelConfigs to apply to samples before ingestion. + metricRelabelings: {} + +# tolerations -- List of node taints to tolerate (requires Kubernetes >= 1.6). +tolerations: [] + +# topologySpreadConstraints -- You can use topology spread constraints to control how Pods are spread across your cluster among failure-domains such as regions, zones, nodes, and other user-defined topology domains. (requires Kubernetes >= 1.19). +topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + # labelSelector: + # matchLabels: + # app.kubernetes.io/instance: cluster-autoscaler + +# updateStrategy -- [Deployment update strategy](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy) +updateStrategy: {} + # rollingUpdate: + # maxSurge: 1 + # maxUnavailable: 0 + # type: RollingUpdate + +# vpa -- Configure a VerticalPodAutoscaler for the cluster-autoscaler Deployment. +vpa: + # vpa.enabled -- If true, creates a VerticalPodAutoscaler. + enabled: false + # vpa.updateMode -- [UpdateMode](https://github.com/kubernetes/autoscaler/blob/vertical-pod-autoscaler/v0.13.0/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1/types.go#L124) + updateMode: "Auto" + # vpa.containerPolicy -- [ContainerResourcePolicy](https://github.com/kubernetes/autoscaler/blob/vertical-pod-autoscaler/v0.13.0/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1/types.go#L159). The containerName is always et to the deployment's container name. This value is required if VPA is enabled. + containerPolicy: {} + +# secretKeyRefNameOverride -- Overrides the name of the Secret to use when loading the secretKeyRef for AWS and Azure env variables +secretKeyRefNameOverride: "" diff --git a/addons/cluster-autoscaler/locals.tf b/addons/cluster-autoscaler/locals.tf new file mode 100644 index 0000000..9807650 --- /dev/null +++ b/addons/cluster-autoscaler/locals.tf @@ -0,0 +1,40 @@ +locals { + name = "cluster-autoscaler" + + default_helm_config = { + name = try(var.cluster_autoscaler_extra_configs.name, local.name) + chart = try(var.cluster_autoscaler_extra_configs.chart, local.name) + repository = try(var.cluster_autoscaler_extra_configs.repository, "https://kubernetes.github.io/autoscaler") + version = try(var.cluster_autoscaler_extra_configs.version, "9.34.1") + namespace = try(var.cluster_autoscaler_extra_configs.namespace, "kube-system") + description = "Cluster Autoscaler helm Chart deployment configuration" + timeout = try(var.cluster_autoscaler_extra_configs.timeout, "600") + lint = try(var.cluster_autoscaler_extra_configs.lint, "false") + repository_key_file = try(var.cluster_autoscaler_extra_configs.repository_key_file, "") + repository_cert_file = try(var.cluster_autoscaler_extra_configs.repository_cert_file, "") + repository_username = try(var.cluster_autoscaler_extra_configs.repository_username, "") + repository_password = try(var.cluster_autoscaler_extra_configs.repository_password, "") + verify = try(var.cluster_autoscaler_extra_configs.verify, "false") + keyring = try(var.cluster_autoscaler_extra_configs.keyring, "") + disable_webhooks = try(var.cluster_autoscaler_extra_configs.disable_webhooks, "false") + reuse_values = try(var.cluster_autoscaler_extra_configs.reuse_values, "false") + reset_values = try(var.cluster_autoscaler_extra_configs.reset_values, "false") + force_update = try(var.cluster_autoscaler_extra_configs.force_update, "false") + recreate_pods = try(var.cluster_autoscaler_extra_configs.recreate_pods, "false") + cleanup_on_fail = try(var.cluster_autoscaler_extra_configs.cleanup_on_fail, "false") + max_history = try(var.cluster_autoscaler_extra_configs.max_history, "0") + atomic = try(var.cluster_autoscaler_extra_configs.atomic, "false") + skip_crds = try(var.cluster_autoscaler_extra_configs.skip_crds, "false") + render_subchart_notes = try(var.cluster_autoscaler_extra_configs.render_subchart_notes, "true") + disable_openapi_validation = try(var.cluster_autoscaler_extra_configs.disable_openapi_validation, "false") + wait = try(var.cluster_autoscaler_extra_configs.wait, "true") + wait_for_jobs = try(var.cluster_autoscaler_extra_configs.wait_for_jobs, "false") + dependency_update = try(var.cluster_autoscaler_extra_configs.dependency_update, "false") + replace = try(var.cluster_autoscaler_extra_configs.replace, "false") + } + + helm_config = merge( + local.default_helm_config, + var.helm_config, + ) +} \ No newline at end of file diff --git a/addons/cluster-autoscaler/main.tf b/addons/cluster-autoscaler/main.tf new file mode 100644 index 0000000..84f10f7 --- /dev/null +++ b/addons/cluster-autoscaler/main.tf @@ -0,0 +1,45 @@ +module "helm_addon" { + source = "../helm" + helm_config = local.helm_config + set_values = [ + { + name = "gcpRegion" + value = "us-central1" + }, + { + name = "autoDiscovery.clusterName" + value = var.gke_cluster_name + }, + { + name = "rbac.serviceAccount.create" + value = "true" + }, + { + name = "rbac.serviceAccount.name" + value = "${local.name}-sa" + }, + { + name = "cloudProvider" + value = "gce" + } + ] + + # -- workload identity Configurations + workload_identity_config = { + project_id = var.project_id + GCP_GSA_NAME = "${local.name}-sa" + GCP_KSA_NAME = "${local.name}-sa" + namespace = local.default_helm_config.namespace + } +} + +resource "google_project_iam_member" "member-role" { + for_each = toset([ + "roles/compute.instanceAdmin.v1", + "roles/iam.serviceAccountTokenCreator" + ]) + role = each.key + member = "serviceAccount:${local.name}-sa@${var.project_id}.iam.gserviceaccount.com" + project = var.project_id +} + diff --git a/addons/cluster-autoscaler/output.tf b/addons/cluster-autoscaler/output.tf new file mode 100644 index 0000000..a3832b1 --- /dev/null +++ b/addons/cluster-autoscaler/output.tf @@ -0,0 +1,11 @@ +output "namespace" { + value = local.default_helm_config.namespace +} + +output "chart_version" { + value = local.default_helm_config.version +} + +output "repository" { + value = local.default_helm_config.repository +} \ No newline at end of file diff --git a/addons/cluster-autoscaler/variables.tf b/addons/cluster-autoscaler/variables.tf new file mode 100644 index 0000000..ba71399 --- /dev/null +++ b/addons/cluster-autoscaler/variables.tf @@ -0,0 +1,22 @@ +variable "helm_config" { + description = "Helm provider config for Cluster Autoscaler" + type = any + default = {} +} + +variable "gke_cluster_name" { + type = string + default = "" +} + +variable "cluster_autoscaler_extra_configs" { + description = "Override attributes of helm_release terraform resource" + type = any + default = {} +} + +variable "project_id" { + description = "GCP project ID" + type = string +} + diff --git a/addons/cluster-autoscaler/versions.tf b/addons/cluster-autoscaler/versions.tf new file mode 100644 index 0000000..953af1f --- /dev/null +++ b/addons/cluster-autoscaler/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + google = { + source = "hashicorp/google" + version = ">= 5.10.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + } +} \ No newline at end of file diff --git a/addons/helm/main.tf b/addons/helm/main.tf new file mode 100644 index 0000000..417d570 --- /dev/null +++ b/addons/helm/main.tf @@ -0,0 +1,73 @@ +resource "helm_release" "addon" { + count = var.manage_via_gitops ? 0 : 1 + name = var.helm_config["name"] + repository = try(var.helm_config["repository"], null) + chart = var.helm_config["chart"] + version = try(var.helm_config["version"], null) + timeout = try(var.helm_config["timeout"], 600) + values = try(var.helm_config["values"], null) + create_namespace = length(var.workload_identity_config) > 0 ? false : try(var.helm_config["create_namespace"], false) + namespace = var.helm_config["namespace"] + lint = try(var.helm_config["lint"], false) + description = try(var.helm_config["description"], "") + repository_key_file = try(var.helm_config["repository_key_file"], "") + repository_cert_file = try(var.helm_config["repository_cert_file"], "") + repository_username = try(var.helm_config["repository_username"], "") + repository_password = try(var.helm_config["repository_password"], "") + verify = try(var.helm_config["verify"], false) + keyring = try(var.helm_config["keyring"], "") + disable_webhooks = try(var.helm_config["disable_webhooks"], false) + reuse_values = try(var.helm_config["reuse_values"], false) + reset_values = try(var.helm_config["reset_values"], false) + force_update = try(var.helm_config["force_update"], false) + recreate_pods = try(var.helm_config["recreate_pods"], false) + cleanup_on_fail = try(var.helm_config["cleanup_on_fail"], false) + max_history = try(var.helm_config["max_history"], 0) + atomic = try(var.helm_config["atomic"], false) + skip_crds = try(var.helm_config["skip_crds"], false) + render_subchart_notes = try(var.helm_config["render_subchart_notes"], true) + disable_openapi_validation = try(var.helm_config["disable_openapi_validation"], false) + wait = try(var.helm_config["wait"], true) + wait_for_jobs = try(var.helm_config["wait_for_jobs"], false) + dependency_update = try(var.helm_config["dependency_update"], false) + replace = try(var.helm_config["replace"], false) + + postrender { + binary_path = try(var.helm_config["postrender"], "") + } + + dynamic "set" { + iterator = each_item + for_each = try(var.helm_config["set"], null) != null ? distinct(concat(var.set_values, var.helm_config["set"])) : var.set_values + + content { + name = each_item.value.name + value = each_item.value.value + type = try(each_item.value.type, null) + } + } + + dynamic "set_sensitive" { + iterator = each_item + for_each = try(var.helm_config["set_sensitive"], null) != null ? concat(var.helm_config["set_sensitive"], var.set_sensitive_values) : var.set_sensitive_values + + content { + name = each_item.value.name + value = each_item.value.value + type = try(each_item.value.type, null) + } + } + depends_on = [module.workload_identity] +} + + +module "workload_identity" { + source = "../../modules/workload_identity" + + count = length(var.workload_identity_config) > 0 ? 1 : 0 + + project_id = try(var.workload_identity_config.project_id, null) + GCP_GSA_NAME = try(var.workload_identity_config.GCP_GSA_NAME, null) + GCP_KSA_NAME = try(var.workload_identity_config.GCP_GSA_NAME, null) + namespace = try(var.workload_identity_config.namespace, null) +} diff --git a/addons/helm/variables.tf b/addons/helm/variables.tf new file mode 100644 index 0000000..cb6a2c0 --- /dev/null +++ b/addons/helm/variables.tf @@ -0,0 +1,29 @@ +variable "helm_config" { + description = "Helm chart config. Repository and version required. See https://registry.terraform.io/providers/hashicorp/helm/latest/docs" + type = any + default = {} +} + +variable "set_values" { + description = "Forced set values" + type = any + default = [] +} + +variable "set_sensitive_values" { + description = "Forced set_sensitive values" + type = any + default = [] +} + +variable "manage_via_gitops" { + description = "Determines if the add-on should be managed via GitOps" + type = bool + default = false +} + +variable "workload_identity_config" { + description = "Input configuration for workload identity module" + type = any + default = {} +} diff --git a/addons/helm/versions.tf b/addons/helm/versions.tf new file mode 100644 index 0000000..389ddcd --- /dev/null +++ b/addons/helm/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + helm = { + source = "hashicorp/helm" + version = ">= 2.4.1" + } + } +} \ No newline at end of file diff --git a/addons/ingress-nginx/README.md b/addons/ingress-nginx/README.md new file mode 100644 index 0000000..dbf3acb --- /dev/null +++ b/addons/ingress-nginx/README.md @@ -0,0 +1,38 @@ + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [google](#requirement\_google) | >= 5.10.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | + +## Providers + +No providers. + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [helm\_addon](#module\_helm\_addon) | ../helm | n/a | + +## Resources + +No resources. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [helm\_config](#input\_helm\_config) | Helm provider config for Cluster Autoscaler | `any` | `{}` | no | +| [ingress\_nginx\_extra\_configs](#input\_ingress\_nginx\_extra\_configs) | Nginx ingress extra config | `any` | `{}` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [chart\_version](#output\_chart\_version) | n/a | +| [namespace](#output\_namespace) | n/a | +| [repository](#output\_repository) | n/a | + \ No newline at end of file diff --git a/addons/ingress-nginx/config/ingress_nginx.yaml b/addons/ingress-nginx/config/ingress_nginx.yaml new file mode 100644 index 0000000..e010a2d --- /dev/null +++ b/addons/ingress-nginx/config/ingress_nginx.yaml @@ -0,0 +1,1094 @@ +## nginx configuration +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/index.md +## + +## Overrides for generated resource names +# See templates/_helpers.tpl +# nameOverride: +# fullnameOverride: + +# -- Override the deployment namespace; defaults to .Release.Namespace +namespaceOverride: "" +## Labels to apply to all resources +## +commonLabels: {} +# scmhash: abc123 +# myLabel: aakkmd + +controller: + name: controller + enableAnnotationValidations: false + image: + ## Keep false as default for now! + chroot: false + registry: registry.k8s.io + image: ingress-nginx/controller + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: "v1.9.5" + digest: sha256:b3aba22b1da80e7acfc52b115cae1d4c687172cbf2b742d5b502419c25ff340e + digestChroot: sha256:9a8d7b25a846a6461cd044b9aea9cf6cad972bcf2e64d9fd246c0279979aad2d + pullPolicy: IfNotPresent + runAsNonRoot: true + # www-data -> uid 101 + runAsUser: 101 + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + readOnlyRootFilesystem: false + # -- Use an existing PSP instead of creating one + existingPsp: "" + # -- Configures the controller container name + containerName: controller + # -- Configures the ports that the nginx-controller listens on + containerPort: + http: 80 + https: 443 + # -- Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ + config: {} + # -- Annotations to be added to the controller config configuration configmap. + configAnnotations: {} + # -- Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers + proxySetHeaders: {} + # -- Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers + addHeaders: {} + # -- Optionally customize the pod dnsConfig. + dnsConfig: {} + # -- Optionally customize the pod hostAliases. + hostAliases: [] + # - ip: 127.0.0.1 + # hostnames: + # - foo.local + # - bar.local + # - ip: 10.1.2.3 + # hostnames: + # - foo.remote + # - bar.remote + # -- Optionally customize the pod hostname. + hostname: {} + # -- Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. + # By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller + # to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. + dnsPolicy: ClusterFirst + # -- Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network + # Ingress status was blank because there is no Service exposing the Ingress-Nginx Controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply + reportNodeInternalIp: false + # -- Process Ingress objects without ingressClass annotation/ingressClassName field + # Overrides value for --watch-ingress-without-class flag of the controller binary + # Defaults to false + watchIngressWithoutClass: false + # -- Process IngressClass per name (additionally as per spec.controller). + ingressClassByName: false + # -- This configuration enables Topology Aware Routing feature, used together with service annotation service.kubernetes.io/topology-mode="auto" + # Defaults to false + enableTopologyAwareRouting: false + # -- This configuration defines if Ingress Controller should allow users to set + # their own *-snippet annotations, otherwise this is forbidden / dropped + # when users add those annotations. + # Global snippets in ConfigMap are still respected + allowSnippetAnnotations: false + # -- Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), + # since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 + # is merged + hostNetwork: false + ## Use host ports 80 and 443 + ## Disabled by default + hostPort: + # -- Enable 'hostPort' or not + enabled: false + ports: + # -- 'hostPort' http port + http: 80 + # -- 'hostPort' https port + https: 443 + # NetworkPolicy for controller component. + networkPolicy: + # -- Enable 'networkPolicy' or not + enabled: false + # -- Election ID to use for status update, by default it uses the controller name combined with a suffix of 'leader' + electionID: "" + ## This section refers to the creation of the IngressClass resource + ## IngressClass resources are supported since k8s >= 1.18 and required since k8s >= 1.19 + ingressClassResource: + # -- Name of the ingressClass + name: nginx + # -- Is this ingressClass enabled or not + enabled: true + # -- Is this the default ingressClass for the cluster + default: false + # -- Controller-value of the controller that is processing this ingressClass + controllerValue: "k8s.io/ingress-nginx" + # -- Parameters is a link to a custom resource containing additional + # configuration for the controller. This is optional if the controller + # does not require extra parameters. + parameters: {} + # -- For backwards compatibility with ingress.class annotation, use ingressClass. + # Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation + ingressClass: nginx + # -- Labels to add to the pod container metadata + podLabels: {} + # key: value + + # -- Security context for controller pods + podSecurityContext: {} + # -- sysctls for controller pods + ## Ref: https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ + sysctls: {} + # sysctls: + # "net.core.somaxconn": "8192" + # -- Security context for controller containers + containerSecurityContext: {} + # -- Allows customization of the source of the IP address or FQDN to report + # in the ingress status field. By default, it reads the information provided + # by the service. If disable, the status field reports the IP address of the + # node or nodes where an ingress controller pod is running. + publishService: + # -- Enable 'publishService' or not + enabled: true + # -- Allows overriding of the publish service to bind to + # Must be / + pathOverride: "" + # Limit the scope of the controller to a specific namespace + scope: + # -- Enable 'scope' or not + enabled: false + # -- Namespace to limit the controller to; defaults to $(POD_NAMESPACE) + namespace: "" + # -- When scope.enabled == false, instead of watching all namespaces, we watching namespaces whose labels + # only match with namespaceSelector. Format like foo=bar. Defaults to empty, means watching all namespaces. + namespaceSelector: "" + # -- Allows customization of the configmap / nginx-configmap namespace; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + tcp: + # -- Allows customization of the tcp-services-configmap; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + # -- Annotations to be added to the tcp config configmap + annotations: {} + udp: + # -- Allows customization of the udp-services-configmap; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + # -- Annotations to be added to the udp config configmap + annotations: {} + # -- Maxmind license key to download GeoLite2 Databases. + ## https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases + maxmindLicenseKey: "" + # -- Additional command line arguments to pass to Ingress-Nginx Controller + # E.g. to specify the default SSL certificate you can use + extraArgs: {} + ## extraArgs: + ## default-ssl-certificate: "/" + ## time-buckets: "0.005,0.01,0.025,0.05,0.1,0.25,0.5,1,2.5,5,10" + ## length-buckets: "10,20,30,40,50,60,70,80,90,100" + ## size-buckets: "10,100,1000,10000,100000,1e+06,1e+07" + + # -- Additional environment variables to set + extraEnvs: [] + # extraEnvs: + # - name: FOO + # valueFrom: + # secretKeyRef: + # key: FOO + # name: secret-resource + + # -- Use a `DaemonSet` or `Deployment` + kind: Deployment + # -- Annotations to be added to the controller Deployment or DaemonSet + ## + annotations: {} + # keel.sh/pollSchedule: "@every 60m" + + # -- Labels to be added to the controller Deployment or DaemonSet and other resources that do not have option to specify labels + ## + labels: {} + # keel.sh/policy: patch + # keel.sh/trigger: poll + + # -- The update strategy to apply to the Deployment or DaemonSet + ## + updateStrategy: {} + # rollingUpdate: + # maxUnavailable: 1 + # type: RollingUpdate + + # -- `minReadySeconds` to avoid killing pods before we are ready + ## + minReadySeconds: 0 + # -- Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + # -- Affinity and anti-affinity rules for server scheduling to nodes + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + # # An example of preferred pod anti-affinity, weight is in the range 1-100 + # podAntiAffinity: + # preferredDuringSchedulingIgnoredDuringExecution: + # - weight: 100 + # podAffinityTerm: + # labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/name + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/component + # operator: In + # values: + # - controller + # topologyKey: kubernetes.io/hostname + + # # An example of required pod anti-affinity + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/name + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/component + # operator: In + # values: + # - controller + # topologyKey: "kubernetes.io/hostname" + + # -- Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + topologySpreadConstraints: [] + # - labelSelector: + # matchLabels: + # app.kubernetes.io/name: '{{ include "ingress-nginx.name" . }}' + # app.kubernetes.io/instance: '{{ .Release.Name }}' + # app.kubernetes.io/component: controller + # topologyKey: topology.kubernetes.io/zone + # maxSkew: 1 + # whenUnsatisfiable: ScheduleAnyway + # - labelSelector: + # matchLabels: + # app.kubernetes.io/name: '{{ include "ingress-nginx.name" . }}' + # app.kubernetes.io/instance: '{{ .Release.Name }}' + # app.kubernetes.io/component: controller + # topologyKey: kubernetes.io/hostname + # maxSkew: 1 + # whenUnsatisfiable: ScheduleAnyway + + # -- `terminationGracePeriodSeconds` to avoid killing pods before we are ready + ## wait up to five minutes for the drain of connections + ## + terminationGracePeriodSeconds: 300 + # -- Node labels for controller pod assignment + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: + kubernetes.io/os: linux + ## Liveness and readiness probe values + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + ## startupProbe: + ## httpGet: + ## # should match container.healthCheckPath + ## path: "/healthz" + ## port: 10254 + ## scheme: HTTP + ## initialDelaySeconds: 5 + ## periodSeconds: 5 + ## timeoutSeconds: 2 + ## successThreshold: 1 + ## failureThreshold: 5 + livenessProbe: + httpGet: + # should match container.healthCheckPath + path: "/healthz" + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + # should match container.healthCheckPath + path: "/healthz" + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + # -- Path of the health check endpoint. All requests received on the port defined by + # the healthz-port parameter are forwarded internally to this path. + healthCheckPath: "/healthz" + # -- Address to bind the health check endpoint. + # It is better to set this option to the internal node address + # if the Ingress-Nginx Controller is running in the `hostNetwork: true` mode. + healthCheckHost: "" + # -- Annotations to be added to controller pods + ## + podAnnotations: {} + replicaCount: 1 + # -- Minimum available pods set in PodDisruptionBudget. + # Define either 'minAvailable' or 'maxUnavailable', never both. + minAvailable: 1 + # -- Maximum unavailable pods set in PodDisruptionBudget. If set, 'minAvailable' is ignored. + # maxUnavailable: 1 + + ## Define requests resources to avoid probe issues due to CPU utilization in busy nodes + ## ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903 + ## Ideally, there should be no limits. + ## https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/ + resources: + ## limits: + ## cpu: 100m + ## memory: 90Mi + requests: + cpu: 100m + memory: 90Mi + # Mutually exclusive with keda autoscaling + autoscaling: + enabled: false + annotations: {} + minReplicas: 1 + maxReplicas: 11 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + behavior: {} + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 2 + # periodSeconds: 60 + autoscalingTemplate: [] + # Custom or additional autoscaling metrics + # ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics + # - type: Pods + # pods: + # metric: + # name: nginx_ingress_controller_nginx_process_requests_total + # target: + # type: AverageValue + # averageValue: 10000m + + # Mutually exclusive with hpa autoscaling + keda: + apiVersion: "keda.sh/v1alpha1" + ## apiVersion changes with keda 1.x vs 2.x + ## 2.x = keda.sh/v1alpha1 + ## 1.x = keda.k8s.io/v1alpha1 + enabled: false + minReplicas: 1 + maxReplicas: 11 + pollingInterval: 30 + cooldownPeriod: 300 + # fallback: + # failureThreshold: 3 + # replicas: 11 + restoreToOriginalReplicaCount: false + scaledObject: + annotations: {} + # Custom annotations for ScaledObject resource + # annotations: + # key: value + triggers: [] + # - type: prometheus + # metadata: + # serverAddress: http://:9090 + # metricName: http_requests_total + # threshold: '100' + # query: sum(rate(http_requests_total{deployment="my-deployment"}[2m])) + + behavior: {} + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 2 + # periodSeconds: 60 + # -- Enable mimalloc as a drop-in replacement for malloc. + ## ref: https://github.com/microsoft/mimalloc + ## + enableMimalloc: true + ## Override NGINX template + customTemplate: + configMapName: "" + configMapKey: "" + service: + # -- Enable controller services or not. This does not influence the creation of either the admission webhook or the metrics service. + enabled: true + external: + # -- Enable the external controller service or not. Useful for internal-only deployments. + enabled: true + # -- Annotations to be added to the external controller service. See `controller.service.internal.annotations` for annotations to be added to the internal controller service. + annotations: {} + # -- Labels to be added to both controller services. + labels: {} + # -- Type of the external controller service. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: LoadBalancer + # -- Pre-defined cluster internal IP address of the external controller service. Take care of collisions with existing services. + # This value is immutable. Set once, it can not be changed without deleting and re-creating the service. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + clusterIP: "" + # -- List of node IP addresses at which the external controller service is available. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + externalIPs: [] + # -- Deprecated: Pre-defined IP address of the external controller service. Used by cloud providers to connect the resulting load balancer service to a pre-existing static IP. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + loadBalancerIP: "" + # -- Restrict access to the external controller service. Values must be CIDRs. Allows any source address by default. + loadBalancerSourceRanges: [] + # -- Load balancer class of the external controller service. Used by cloud providers to select a load balancer implementation other than the cloud provider default. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class + loadBalancerClass: "" + # -- Enable node port allocation for the external controller service or not. Applies to type `LoadBalancer` only. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation + # allocateLoadBalancerNodePorts: true + + # -- External traffic policy of the external controller service. Set to "Local" to preserve source IP on providers supporting it. + # Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + externalTrafficPolicy: "" + # -- Session affinity of the external controller service. Must be either "None" or "ClientIP" if set. Defaults to "None". + # Ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#session-affinity + sessionAffinity: "" + # -- Specifies the health check node port (numeric port number) for the external controller service. + # If not specified, the service controller allocates a port from your cluster's node port range. + # Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + # healthCheckNodePort: 0 + + # -- Represents the dual-stack capabilities of the external controller service. Possible values are SingleStack, PreferDualStack or RequireDualStack. + # Fields `ipFamilies` and `clusterIP` depend on the value of this field. + # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services + ipFamilyPolicy: SingleStack + # -- List of IP families (e.g. IPv4, IPv6) assigned to the external controller service. This field is usually assigned automatically based on cluster configuration and the `ipFamilyPolicy` field. + # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services + ipFamilies: + - IPv4 + # -- Enable the HTTP listener on both controller services or not. + enableHttp: true + # -- Enable the HTTPS listener on both controller services or not. + enableHttps: true + ports: + # -- Port the external HTTP listener is published with. + http: 80 + # -- Port the external HTTPS listener is published with. + https: 443 + targetPorts: + # -- Port of the ingress controller the external HTTP listener is mapped to. + http: http + # -- Port of the ingress controller the external HTTPS listener is mapped to. + https: https + # -- Declare the app protocol of the external HTTP and HTTPS listeners or not. Supersedes provider-specific annotations for declaring the backend protocol. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#application-protocol + appProtocol: true + nodePorts: + # -- Node port allocated for the external HTTP listener. If left empty, the service controller allocates one from the configured node port range. + http: "" + # -- Node port allocated for the external HTTPS listener. If left empty, the service controller allocates one from the configured node port range. + https: "" + # -- Node port mapping for external TCP listeners. If left empty, the service controller allocates them from the configured node port range. + # Example: + # tcp: + # 8080: 30080 + tcp: {} + # -- Node port mapping for external UDP listeners. If left empty, the service controller allocates them from the configured node port range. + # Example: + # udp: + # 53: 30053 + udp: {} + internal: + # -- Enable the internal controller service or not. Remember to configure `controller.service.internal.annotations` when enabling this. + enabled: false + # -- Annotations to be added to the internal controller service. Mandatory for the internal controller service to be created. Varies with the cloud service. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + annotations: {} + # -- Type of the internal controller service. + # Defaults to the value of `controller.service.type`. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: "" + # -- Pre-defined cluster internal IP address of the internal controller service. Take care of collisions with existing services. + # This value is immutable. Set once, it can not be changed without deleting and re-creating the service. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + clusterIP: "" + # -- List of node IP addresses at which the internal controller service is available. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + externalIPs: [] + # -- Deprecated: Pre-defined IP address of the internal controller service. Used by cloud providers to connect the resulting load balancer service to a pre-existing static IP. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + loadBalancerIP: "" + # -- Restrict access to the internal controller service. Values must be CIDRs. Allows any source address by default. + loadBalancerSourceRanges: [] + # -- Load balancer class of the internal controller service. Used by cloud providers to select a load balancer implementation other than the cloud provider default. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class + loadBalancerClass: "" + # -- Enable node port allocation for the internal controller service or not. Applies to type `LoadBalancer` only. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation + # allocateLoadBalancerNodePorts: true + + # -- External traffic policy of the internal controller service. Set to "Local" to preserve source IP on providers supporting it. + # Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + externalTrafficPolicy: "" + # -- Session affinity of the internal controller service. Must be either "None" or "ClientIP" if set. Defaults to "None". + # Ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#session-affinity + sessionAffinity: "" + # -- Specifies the health check node port (numeric port number) for the internal controller service. + # If not specified, the service controller allocates a port from your cluster's node port range. + # Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + # healthCheckNodePort: 0 + + # -- Represents the dual-stack capabilities of the internal controller service. Possible values are SingleStack, PreferDualStack or RequireDualStack. + # Fields `ipFamilies` and `clusterIP` depend on the value of this field. + # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services + ipFamilyPolicy: SingleStack + # -- List of IP families (e.g. IPv4, IPv6) assigned to the internal controller service. This field is usually assigned automatically based on cluster configuration and the `ipFamilyPolicy` field. + # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services + ipFamilies: + - IPv4 + ports: {} + # -- Port the internal HTTP listener is published with. + # Defaults to the value of `controller.service.ports.http`. + # http: 80 + # -- Port the internal HTTPS listener is published with. + # Defaults to the value of `controller.service.ports.https`. + # https: 443 + + targetPorts: {} + # -- Port of the ingress controller the internal HTTP listener is mapped to. + # Defaults to the value of `controller.service.targetPorts.http`. + # http: http + # -- Port of the ingress controller the internal HTTPS listener is mapped to. + # Defaults to the value of `controller.service.targetPorts.https`. + # https: https + + # -- Declare the app protocol of the internal HTTP and HTTPS listeners or not. Supersedes provider-specific annotations for declaring the backend protocol. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#application-protocol + appProtocol: true + nodePorts: + # -- Node port allocated for the internal HTTP listener. If left empty, the service controller allocates one from the configured node port range. + http: "" + # -- Node port allocated for the internal HTTPS listener. If left empty, the service controller allocates one from the configured node port range. + https: "" + # -- Node port mapping for internal TCP listeners. If left empty, the service controller allocates them from the configured node port range. + # Example: + # tcp: + # 8080: 30080 + tcp: {} + # -- Node port mapping for internal UDP listeners. If left empty, the service controller allocates them from the configured node port range. + # Example: + # udp: + # 53: 30053 + udp: {} + # shareProcessNamespace enables process namespace sharing within the pod. + # This can be used for example to signal log rotation using `kill -USR1` from a sidecar. + shareProcessNamespace: false + # -- Additional containers to be added to the controller pod. + # See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. + extraContainers: [] + # - name: my-sidecar + # image: nginx:latest + # - name: lemonldap-ng-controller + # image: lemonldapng/lemonldap-ng-controller:0.2.0 + # args: + # - /lemonldap-ng-controller + # - --alsologtostderr + # - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration + # env: + # - name: POD_NAME + # valueFrom: + # fieldRef: + # fieldPath: metadata.name + # - name: POD_NAMESPACE + # valueFrom: + # fieldRef: + # fieldPath: metadata.namespace + # volumeMounts: + # - name: copy-portal-skins + # mountPath: /srv/var/lib/lemonldap-ng/portal/skins + + # -- Additional volumeMounts to the controller main container. + extraVolumeMounts: [] + # - name: copy-portal-skins + # mountPath: /var/lib/lemonldap-ng/portal/skins + + # -- Additional volumes to the controller pod. + extraVolumes: [] + # - name: copy-portal-skins + # emptyDir: {} + + # -- Containers, which are run before the app containers are started. + extraInitContainers: [] + # - name: init-myservice + # image: busybox + # command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;'] + + # -- Modules, which are mounted into the core nginx image. See values.yaml for a sample to add opentelemetry module + extraModules: [] + # - name: mytestmodule + # image: + # registry: registry.k8s.io + # image: ingress-nginx/mytestmodule + # ## for backwards compatibility consider setting the full image url via the repository value below + # ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + # ## repository: + # tag: "v1.0.0" + # digest: "" + # distroless: false + # containerSecurityContext: + # runAsNonRoot: true + # runAsUser: + # allowPrivilegeEscalation: false + # seccompProfile: + # type: RuntimeDefault + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # resources: {} + # + # The image must contain a `/usr/local/bin/init_module.sh` executable, which + # will be executed as initContainers, to move its config files within the + # mounted volume. + + opentelemetry: + enabled: false + name: opentelemetry + image: + registry: registry.k8s.io + image: ingress-nginx/opentelemetry + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: "v20230721-3e2062ee5" + digest: sha256:13bee3f5223883d3ca62fee7309ad02d22ec00ff0d7033e3e9aca7a9f60fd472 + distroless: true + containerSecurityContext: + runAsNonRoot: true + # -- The image's default user, inherited from its base image `cgr.dev/chainguard/static`. + runAsUser: 65532 + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + resources: {} + admissionWebhooks: + name: admission + annotations: {} + # ignore-check.kube-linter.io/no-read-only-rootfs: "This deployment needs write access to root filesystem". + + ## Additional annotations to the admission webhooks. + ## These annotations will be added to the ValidatingWebhookConfiguration and + ## the Jobs Spec of the admission webhooks. + enabled: true + # -- Additional environment variables to set + extraEnvs: [] + # extraEnvs: + # - name: FOO + # valueFrom: + # secretKeyRef: + # key: FOO + # name: secret-resource + # -- Admission Webhook failure policy to use + failurePolicy: Fail + # timeoutSeconds: 10 + port: 8443 + certificate: "/usr/local/certificates/cert" + key: "/usr/local/certificates/key" + namespaceSelector: {} + objectSelector: {} + # -- Labels to be added to admission webhooks + labels: {} + # -- Use an existing PSP instead of creating one + existingPsp: "" + service: + annotations: {} + # clusterIP: "" + externalIPs: [] + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 443 + type: ClusterIP + createSecretJob: + name: create + # -- Security context for secret creation containers + securityContext: + runAsNonRoot: true + runAsUser: 65532 + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + resources: {} + # limits: + # cpu: 10m + # memory: 20Mi + # requests: + # cpu: 10m + # memory: 20Mi + patchWebhookJob: + name: patch + # -- Security context for webhook patch containers + securityContext: + runAsNonRoot: true + runAsUser: 65532 + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + resources: {} + patch: + enabled: true + image: + registry: registry.k8s.io + image: ingress-nginx/kube-webhook-certgen + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: v20231011-8b53cabe0 + digest: sha256:a7943503b45d552785aa3b5e457f169a5661fb94d82b8a3373bcd9ebaf9aac80 + pullPolicy: IfNotPresent + # -- Provide a priority class name to the webhook patching job + ## + priorityClassName: "" + podAnnotations: {} + # NetworkPolicy for webhook patch + networkPolicy: + # -- Enable 'networkPolicy' or not + enabled: false + nodeSelector: + kubernetes.io/os: linux + tolerations: [] + # -- Labels to be added to patch job resources + labels: {} + # -- Security context for secret creation & webhook patch pods + securityContext: {} + # Use certmanager to generate webhook certs + certManager: + enabled: false + # self-signed root certificate + rootCert: + # default to be 5y + duration: "" + admissionCert: + # default to be 1y + duration: "" + # issuerRef: + # name: "issuer" + # kind: "ClusterIssuer" + metrics: + port: 10254 + portName: metrics + # if this port is changed, change healthz-port: in extraArgs: accordingly + enabled: false + service: + annotations: {} + # prometheus.io/scrape: "true" + # prometheus.io/port: "10254" + # -- Labels to be added to the metrics service resource + labels: {} + # clusterIP: "" + + # -- List of IP addresses at which the stats-exporter service is available + ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + ## + externalIPs: [] + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 10254 + type: ClusterIP + # externalTrafficPolicy: "" + # nodePort: "" + serviceMonitor: + enabled: false + additionalLabels: {} + annotations: {} + ## The label to use to retrieve the job name from. + ## jobLabel: "app.kubernetes.io/name" + namespace: "" + namespaceSelector: {} + ## Default: scrape .Release.Namespace or namespaceOverride only + ## To scrape all, use the following: + ## namespaceSelector: + ## any: true + scrapeInterval: 30s + # honorLabels: true + targetLabels: [] + relabelings: [] + metricRelabelings: [] + prometheusRule: + enabled: false + additionalLabels: {} + # namespace: "" + rules: [] + # # These are just examples rules, please adapt them to your needs + # - alert: NGINXConfigFailed + # expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0 + # for: 1s + # labels: + # severity: critical + # annotations: + # description: bad ingress config - nginx config test failed + # summary: uninstall the latest ingress changes to allow config reloads to resume + # # By default a fake self-signed certificate is generated as default and + # # it is fine if it expires. If `--default-ssl-certificate` flag is used + # # and a valid certificate passed please do not filter for `host` label! + # # (i.e. delete `{host!="_"}` so also the default SSL certificate is + # # checked for expiration) + # - alert: NGINXCertificateExpiry + # expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds{host!="_"}) by (host) - time()) < 604800 + # for: 1s + # labels: + # severity: critical + # annotations: + # description: ssl certificate(s) will expire in less then a week + # summary: renew expiring certificates to avoid downtime + # - alert: NGINXTooMany500s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: warning + # annotations: + # description: Too many 5XXs + # summary: More than 5% of all requests returned 5XX, this requires your attention + # - alert: NGINXTooMany400s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: warning + # annotations: + # description: Too many 4XXs + # summary: More than 5% of all requests returned 4XX, this requires your attention + # -- Improve connection draining when ingress controller pod is deleted using a lifecycle hook: + # With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds + # to 300, allowing the draining of connections up to five minutes. + # If the active connections end before that, the pod will terminate gracefully at that time. + # To effectively take advantage of this feature, the Configmap feature + # worker-shutdown-timeout new value is 240s instead of 10s. + ## + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + priorityClassName: "" +# -- Rollback limit +## +revisionHistoryLimit: 10 +## Default 404 backend +## +defaultBackend: + ## + enabled: false + name: defaultbackend + image: + registry: registry.k8s.io + image: defaultbackend-amd64 + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: "1.5" + pullPolicy: IfNotPresent + runAsNonRoot: true + # nobody user -> uid 65534 + runAsUser: 65534 + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + readOnlyRootFilesystem: true + # -- Use an existing PSP instead of creating one + existingPsp: "" + extraArgs: {} + serviceAccount: + create: true + name: "" + automountServiceAccountToken: true + # -- Additional environment variables to set for defaultBackend pods + extraEnvs: [] + port: 8080 + ## Readiness and liveness probes for default backend + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + ## + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 6 + initialDelaySeconds: 0 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + # -- The update strategy to apply to the Deployment or DaemonSet + ## + updateStrategy: {} + # rollingUpdate: + # maxUnavailable: 1 + # type: RollingUpdate + + # -- `minReadySeconds` to avoid killing pods before we are ready + ## + minReadySeconds: 0 + # -- Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + affinity: {} + # -- Security context for default backend pods + podSecurityContext: {} + # -- Security context for default backend containers + containerSecurityContext: {} + # -- Labels to add to the pod container metadata + podLabels: {} + # key: value + + # -- Node labels for default backend pod assignment + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: + kubernetes.io/os: linux + # -- Annotations to be added to default backend pods + ## + podAnnotations: {} + replicaCount: 1 + minAvailable: 1 + resources: {} + # limits: + # cpu: 10m + # memory: 20Mi + # requests: + # cpu: 10m + # memory: 20Mi + + extraVolumeMounts: [] + ## Additional volumeMounts to the default backend container. + # - name: copy-portal-skins + # mountPath: /var/lib/lemonldap-ng/portal/skins + + extraVolumes: [] + ## Additional volumes to the default backend pod. + # - name: copy-portal-skins + # emptyDir: {} + + extraConfigMaps: [] + ## Additional configmaps to the default backend pod. + # - name: my-extra-configmap-1 + # labels: + # type: config-1 + # data: + # extra_file_1.html: | + # + # - name: my-extra-configmap-2 + # labels: + # type: config-2 + # data: + # extra_file_2.html: | + # + + autoscaling: + annotations: {} + enabled: false + minReplicas: 1 + maxReplicas: 2 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + # NetworkPolicy for default backend component. + networkPolicy: + # -- Enable 'networkPolicy' or not + enabled: false + service: + annotations: {} + # clusterIP: "" + + # -- List of IP addresses at which the default backend service is available + ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + ## + externalIPs: [] + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + type: ClusterIP + priorityClassName: "" + # -- Labels to be added to the default backend resources + labels: {} +## Enable RBAC as per https://github.com/kubernetes/ingress-nginx/blob/main/docs/deploy/rbac.md and https://github.com/kubernetes/ingress-nginx/issues/266 +rbac: + create: true + scope: false +## If true, create & use Pod Security Policy resources +## https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +podSecurityPolicy: + enabled: false +serviceAccount: + create: true + name: "" + automountServiceAccountToken: true + # -- Annotations for the controller service account + annotations: {} +# -- Optional array of imagePullSecrets containing private registry credentials +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +imagePullSecrets: [] +# - name: secretName + +# -- TCP service key-value pairs +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md +## +tcp: {} +# 8080: "default/example-tcp-svc:9000" + +# -- UDP service key-value pairs +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md +## +udp: {} +# 53: "kube-system/kube-dns:53" + +# -- Prefix for TCP and UDP ports names in ingress controller service +## Some cloud providers, like Yandex Cloud may have a requirements for a port name regex to support cloud load balancer integration +portNamePrefix: "" +# -- (string) A base64-encoded Diffie-Hellman parameter. +# This can be generated with: `openssl dhparam 4096 2> /dev/null | base64` +## Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param +dhParam: "" diff --git a/addons/ingress-nginx/locals.tf b/addons/ingress-nginx/locals.tf new file mode 100644 index 0000000..d5f7350 --- /dev/null +++ b/addons/ingress-nginx/locals.tf @@ -0,0 +1,40 @@ +locals { + name = "ingress-nginx" + + default_helm_config = { + name = try(var.ingress_nginx_extra_configs.name, local.name) + chart = try(var.ingress_nginx_extra_configs.chart, local.name) + repository = try(var.ingress_nginx_extra_configs.repository, "https://kubernetes.github.io/ingress-nginx") + version = try(var.ingress_nginx_extra_configs.version, "4.6.1") + namespace = try(var.ingress_nginx_extra_configs.namespace, "kube-system") + description = "Nginx Ingress helm Chart deployment configuration" + timeout = try(var.ingress_nginx_extra_configs.timeout, "600") + lint = try(var.ingress_nginx_extra_configs.lint, "false") + repository_key_file = try(var.ingress_nginx_extra_configs.repository_key_file, "") + repository_cert_file = try(var.ingress_nginx_extra_configs.repository_cert_file, "") + repository_username = try(var.ingress_nginx_extra_configs.repository_username, "") + repository_password = try(var.ingress_nginx_extra_configs.repository_password, "") + verify = try(var.ingress_nginx_extra_configs.verify, "false") + keyring = try(var.ingress_nginx_extra_configs.keyring, "") + disable_webhooks = try(var.ingress_nginx_extra_configs.disable_webhooks, "false") + reuse_values = try(var.ingress_nginx_extra_configs.reuse_values, "false") + reset_values = try(var.ingress_nginx_extra_configs.reset_values, "false") + force_update = try(var.ingress_nginx_extra_configs.force_update, "false") + recreate_pods = try(var.ingress_nginx_extra_configs.recreate_pods, "false") + cleanup_on_fail = try(var.ingress_nginx_extra_configs.cleanup_on_fail, "false") + max_history = try(var.ingress_nginx_extra_configs.max_history, "0") + atomic = try(var.ingress_nginx_extra_configs.atomic, "false") + skip_crds = try(var.ingress_nginx_extra_configs.skip_crds, "false") + render_subchart_notes = try(var.ingress_nginx_extra_configs.render_subchart_notes, "true") + disable_openapi_validation = try(var.ingress_nginx_extra_configs.disable_openapi_validation, "false") + wait = try(var.ingress_nginx_extra_configs.wait, "true") + wait_for_jobs = try(var.ingress_nginx_extra_configs.wait_for_jobs, "false") + dependency_update = try(var.ingress_nginx_extra_configs.dependency_update, "false") + replace = try(var.ingress_nginx_extra_configs.replace, "false") + } + + helm_config = merge( + local.default_helm_config, + var.helm_config, + ) +} \ No newline at end of file diff --git a/addons/ingress-nginx/main.tf b/addons/ingress-nginx/main.tf new file mode 100644 index 0000000..83b8786 --- /dev/null +++ b/addons/ingress-nginx/main.tf @@ -0,0 +1,4 @@ +module "helm_addon" { + source = "../helm" + helm_config = local.helm_config +} \ No newline at end of file diff --git a/addons/ingress-nginx/output.tf b/addons/ingress-nginx/output.tf new file mode 100644 index 0000000..a3832b1 --- /dev/null +++ b/addons/ingress-nginx/output.tf @@ -0,0 +1,11 @@ +output "namespace" { + value = local.default_helm_config.namespace +} + +output "chart_version" { + value = local.default_helm_config.version +} + +output "repository" { + value = local.default_helm_config.repository +} \ No newline at end of file diff --git a/addons/ingress-nginx/variable.tf b/addons/ingress-nginx/variable.tf new file mode 100644 index 0000000..58f61fd --- /dev/null +++ b/addons/ingress-nginx/variable.tf @@ -0,0 +1,13 @@ +variable "helm_config" { + description = "Helm provider config for Cluster Autoscaler" + type = any + default = {} +} + +variable "ingress_nginx_extra_configs" { + description = "Nginx ingress extra config" + type = any + default = {} +} + + diff --git a/addons/ingress-nginx/version.tf b/addons/ingress-nginx/version.tf new file mode 100644 index 0000000..953af1f --- /dev/null +++ b/addons/ingress-nginx/version.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + google = { + source = "hashicorp/google" + version = ">= 5.10.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + } +} \ No newline at end of file diff --git a/addons/keda/README.md b/addons/keda/README.md new file mode 100644 index 0000000..2886e6e --- /dev/null +++ b/addons/keda/README.md @@ -0,0 +1,38 @@ + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [google](#requirement\_google) | >= 5.10.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | + +## Providers + +No providers. + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [helm\_addon](#module\_helm\_addon) | ../helm | n/a | + +## Resources + +No resources. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [helm\_config](#input\_helm\_config) | Helm provider config for Keda | `any` | `{}` | no | +| [keda\_extra\_configs](#input\_keda\_extra\_configs) | Override attributes of helm\_release terraform resource | `any` | `{}` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [chart\_version](#output\_chart\_version) | n/a | +| [namespace](#output\_namespace) | n/a | +| [repository](#output\_repository) | n/a | + \ No newline at end of file diff --git a/addons/keda/config/keda.yaml b/addons/keda/config/keda.yaml new file mode 100644 index 0000000..82d6fb8 --- /dev/null +++ b/addons/keda/config/keda.yaml @@ -0,0 +1,634 @@ +# Default values for keda. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +image: + keda: + # -- Image name of KEDA operator + repository: ghcr.io/kedacore/keda + # -- Image tag of KEDA operator. Optional, given app version of Helm chart is used by default + tag: "" + metricsApiServer: + # -- Image name of KEDA Metrics API Server + repository: ghcr.io/kedacore/keda-metrics-apiserver + # -- Image tag of KEDA Metrics API Server. Optional, given app version of Helm chart is used by default + tag: "" + webhooks: + # -- Image name of KEDA admission-webhooks + repository: ghcr.io/kedacore/keda-admission-webhooks + # -- Image tag of KEDA admission-webhooks . Optional, given app version of Helm chart is used by default + tag: "" + # -- Image pullPolicy for all KEDA components + pullPolicy: Always + +# -- Kubernetes cluster domain +clusterDomain: cluster.local + +crds: + # -- Defines whether the KEDA CRDs have to be installed or not. + install: true + +# -- Defines Kubernetes namespaces to watch to scale their workloads. Default watches all namespaces +watchNamespace: "" + +# -- Name of secret to use to pull images to use to pull Docker images +imagePullSecrets: [] + +operator: + # -- Name of the KEDA operator + name: keda-operator + # -- Capability to configure the number of replicas for KEDA operator. + # While you can run more replicas of our operator, only one operator instance will be the leader and serving traffic. + # You can run multiple replicas, but they will not improve the performance of KEDA, it could only reduce downtime during a failover. + # Learn more in [our documentation](https://keda.sh/docs/latest/operate/cluster/#high-availability). + replicaCount: 1 + # -- [Affinity] for pod scheduling for KEDA operator. Takes precedence over the `affinity` field + affinity: {} + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app + # operator: In + # values: + # - keda-operator + # topologyKey: "kubernetes.io/hostname" + +metricsServer: + # -- Capability to configure the number of replicas for KEDA metric server. + # While you can run more replicas of our metric server, only one instance will used and serve traffic. + # You can run multiple replicas, but they will not improve the performance of KEDA, it could only reduce downtime during a failover. + # Learn more in [our documentation](https://keda.sh/docs/latest/operate/cluster/#high-availability). + replicaCount: 1 + # use ClusterFirstWithHostNet if `useHostNetwork: true` https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy + # -- Defined the DNS policy for the metric server + dnsPolicy: ClusterFirst + # -- Enable metric server to use host network + useHostNetwork: false + # -- [Affinity] for pod scheduling for Metrics API Server. Takes precedence over the `affinity` field + affinity: {} + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app + # operator: In + # values: + # - keda-operator-metrics-apiserver + # topologyKey: "kubernetes.io/hostname" + +webhooks: + # -- Enable admission webhooks (this feature option will be removed in v2.12) + enabled: true + # -- Port number to use for KEDA admission webhooks. Default is 9443. + port: "" + # -- Port number to use for KEDA admission webhooks health probe + healthProbePort: 8081 + # -- Enable webhook to use host network, this is required on EKS with custom CNI + useHostNetwork: false + # -- Name of the KEDA admission webhooks + name: keda-admission-webhooks + # -- Capability to configure the number of replicas for KEDA admission webhooks + replicaCount: 1 + # -- [Affinity] for pod scheduling for KEDA admission webhooks. Takes precedence over the `affinity` field + affinity: {} + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app + # operator: In + # values: + # - keda-operator + # topologyKey: "kubernetes.io/hostname" + + # -- [Failure policy](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#failure-policy) to use with KEDA admission webhooks + failurePolicy: Ignore + +upgradeStrategy: + # -- Capability to configure [Deployment upgrade strategy] for operator + operator: {} + # type: RollingUpdate + # rollingUpdate: + # maxUnavailable: 1 + # maxSurge: 1 + + # -- Capability to configure [Deployment upgrade strategy] for Metrics Api Server + metricsApiServer: {} + # type: RollingUpdate + # rollingUpdate: + # maxUnavailable: 1 + # maxSurge: 1 + + # -- Capability to configure [Deployment upgrade strategy] for Admission webhooks + webhooks: {} + # type: RollingUpdate + # rollingUpdate: + # maxUnavailable: 1 + # maxSurge: 1 + +podDisruptionBudget: + # -- Capability to configure [Pod Disruption Budget] + operator: {} + # minAvailable: 1 + # maxUnavailable: 1 + + # -- Capability to configure [Pod Disruption Budget] + metricServer: {} + # minAvailable: 1 + # maxUnavailable: 1 + + # -- Capability to configure [Pod Disruption Budget] + webhooks: {} + # minAvailable: 1 + # maxUnavailable: 1 + +# -- Custom labels to add into metadata +additionalLabels: + {} + # foo: bar + +# -- Custom annotations to add into metadata +additionalAnnotations: + {} + # foo: bar + +podAnnotations: + # -- Pod annotations for KEDA operator + keda: {} + # -- Pod annotations for KEDA Metrics Adapter + metricsAdapter: {} + # -- Pod annotations for KEDA Admission webhooks + webhooks: {} +podLabels: + # -- Pod labels for KEDA operator + keda: {} + # -- Pod labels for KEDA Metrics Adapter + metricsAdapter: {} + # -- Pod labels for KEDA Admission webhooks + webhooks: {} + +rbac: + # -- Specifies whether RBAC should be used + create: true + # -- Specifies whether RBAC for CRDs should be [aggregated](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles) to default roles (view, edit, admin) + aggregateToDefaultRoles: false + +serviceAccount: + # -- Specifies whether a service account should be created + create: true + # -- The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: keda-operator + # -- Specifies whether a service account should automount API-Credentials + automountServiceAccountToken: true + # -- Annotations to add to the service account + annotations: {} + +podIdentity: + activeDirectory: + # Set to the value of the Azure Active Directory Pod Identity + # See https://keda.sh/docs/concepts/authentication/#azure-pod-identity + # This will be set as a label on the KEDA Pod(s) + # -- Identity in Azure Active Directory to use for Azure pod identity + identity: "" + azureWorkload: + # -- Set to true to enable Azure Workload Identity usage. + # See https://keda.sh/docs/concepts/authentication/#azure-workload-identity + # This will be set as a label on the KEDA service account. + enabled: false + # Set to the value of the Azure Active Directory Client and Tenant Ids + # respectively. These will be set as annotations on the KEDA service account. + # -- Id of Azure Active Directory Client to use for authentication with Azure Workload Identity. ([docs](https://keda.sh/docs/concepts/authentication/#azure-workload-identity)) + clientId: "" + # -- Id Azure Active Directory Tenant to use for authentication with for Azure Workload Identity. ([docs](https://keda.sh/docs/concepts/authentication/#azure-workload-identity)) + tenantId: "" + # Set to the value of the service account token expiration duration. + # This will be set as an annotation on the KEDA service account. + # -- Duration in seconds to automatically expire tokens for the service account. ([docs](https://keda.sh/docs/concepts/authentication/#azure-workload-identity)) + tokenExpiration: 3600 + aws: + irsa: + # -- Specifies whether [AWS IAM Roles for Service Accounts (IRSA)](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) is to be enabled or not. + enabled: false + # -- Sets the token audience for IRSA. + # This will be set as an annotation on the KEDA service account. + audience: "sts.amazonaws.com" + # -- Set to the value of the ARN of an IAM role with a web identity provider. + # This will be set as an annotation on the KEDA service account. + roleArn: "" + # -- Sets the use of an STS regional endpoint instead of global. + # Recommended to use regional endpoint in almost all cases. + # This will be set as an annotation on the KEDA service account. + stsRegionalEndpoints: "true" + # -- Set to the value of the service account token expiration duration. + # This will be set as an annotation on the KEDA service account. + tokenExpiration: 86400 + gcp: + # -- Set to true to enable GCP Workload Identity. + # See https://keda.sh/docs/2.10/authentication-providers/gcp-workload-identity/ + # This will be set as a annotation on the KEDA service account. + enabled: false + # -- GCP IAM Service Account Email which you would like to use for workload identity. + gcpIAMServiceAccount: "" + +# -- Set this if you are using an external scaler and want to communicate +# over TLS (recommended). This variable holds the name of the secret that +# will be mounted to the /grpccerts path on the Pod +grpcTLSCertsSecret: "" + +# -- Set this if you are using HashiCorp Vault and want to communicate +# over TLS (recommended). This variable holds the name of the secret that +# will be mounted to the /vault path on the Pod +hashiCorpVaultTLS: "" + +logging: + operator: + # -- Logging level for KEDA Operator. + # allowed values: `debug`, `info`, `error`, or an integer value greater than 0, specified as string + level: info + # -- Logging format for KEDA Operator. + # allowed values: `json` or `console` + format: console + # -- Logging time encoding for KEDA Operator. + # allowed values are `epoch`, `millis`, `nano`, `iso8601`, `rfc3339` or `rfc3339nano` + timeEncoding: rfc3339 + metricServer: + # -- Logging level for Metrics Server. + # allowed values: `0` for info, `4` for debug, or an integer value greater than 0, specified as string + level: 0 + + webhooks: + # -- Logging level for KEDA Operator. + # allowed values: `debug`, `info`, `error`, or an integer value greater than 0, specified as string + level: info + # -- Logging format for KEDA Admission webhooks. + # allowed values: `json` or `console` + format: console + # -- Logging time encoding for KEDA Operator. + # allowed values are `epoch`, `millis`, `nano`, `iso8601`, `rfc3339` or `rfc3339nano` + timeEncoding: rfc3339 + +# -- [Security context] for all containers +# @default -- [See below](#KEDA-is-secure-by-default) +securityContext: + # -- [Security context] of the operator container + # @default -- [See below](#KEDA-is-secure-by-default) + operator: + capabilities: + drop: + - ALL + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + seccompProfile: + type: RuntimeDefault + # -- [Security context] of the metricServer container + # @default -- [See below](#KEDA-is-secure-by-default) + metricServer: + capabilities: + drop: + - ALL + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + seccompProfile: + type: RuntimeDefault + # -- [Security context] of the admission webhooks container + # @default -- [See below](#KEDA-is-secure-by-default) + webhooks: + capabilities: + drop: + - ALL + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + seccompProfile: + type: RuntimeDefault + +# -- [Pod security context] for all pods +# @default -- [See below](#KEDA-is-secure-by-default) +podSecurityContext: + # -- [Pod security context] of the KEDA operator pod + # @default -- [See below](#KEDA-is-secure-by-default) + operator: + runAsNonRoot: true + # runAsUser: 1000 + # runAsGroup: 1000 + # fsGroup: 1000 + + # -- [Pod security context] of the KEDA metrics apiserver pod + # @default -- [See below](#KEDA-is-secure-by-default) + metricServer: + runAsNonRoot: true + # runAsUser: 1000 + # runAsGroup: 1000 + # fsGroup: 1000 + + # -- [Pod security context] of the KEDA admission webhooks + # @default -- [See below](#KEDA-is-secure-by-default) + webhooks: + runAsNonRoot: true + # runAsUser: 1000 + # runAsGroup: 1000 + # fsGroup: 1000 + +service: + # -- KEDA Metric Server service type + type: ClusterIP + # -- HTTPS port for KEDA Metric Server service + portHttps: 443 + # -- HTTPS port for KEDA Metric Server container + portHttpsTarget: 6443 + # -- Annotations to add the KEDA Metric Server service + annotations: {} + +# We provides the default values that we describe in our docs: +# https://keda.sh/docs/latest/operate/cluster/ +# If you want to specify the resources (or totally remove the defaults), change or comment the following +# lines, adjust them as necessary, or simply add the curly braces after 'operator' and/or 'metricServer' +# and remove/comment the default values +resources: + # -- Manage [resource request & limits] of KEDA operator pod + operator: + limits: + cpu: 1 + memory: 1000Mi + requests: + cpu: 100m + memory: 100Mi + # -- Manage [resource request & limits] of KEDA metrics apiserver pod + metricServer: + limits: + cpu: 1 + memory: 1000Mi + requests: + cpu: 100m + memory: 100Mi + # -- Manage [resource request & limits] of KEDA admission webhooks pod + webhooks: + limits: + cpu: 50m + memory: 100Mi + requests: + cpu: 10m + memory: 10Mi +# -- Node selector for pod scheduling ([docs](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/)) +nodeSelector: {} +# -- Tolerations for pod scheduling ([docs](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/)) +tolerations: [] + +topologySpreadConstraints: + # -- [Pod Topology Constraints] of KEDA operator pod + operator: [] + # -- [Pod Topology Constraints] of KEDA metrics apiserver pod + metricsServer: [] + # -- [Pod Topology Constraints] of KEDA admission webhooks pod + webhooks: [] + +# -- [Affinity] for pod scheduling for both KEDA operator and Metrics API Server +affinity: {} + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app + # operator: In + # values: + # - keda-operator + # - keda-operator-metrics-apiserver + # topologyKey: "kubernetes.io/hostname" + +# -- priorityClassName for all KEDA components +priorityClassName: "" + +## The default HTTP timeout in milliseconds that KEDA should use +## when making requests to external services. Removing this defaults to a +## reasonable default +http: + # -- The default HTTP timeout to use for all scalers that use raw HTTP clients (some scalers use SDKs to access target services. These have built-in HTTP clients, and the timeout does not necessarily apply to them) + timeout: 3000 + keepAlive: + # -- Enable HTTP connection keep alive + enabled: true + # -- The minimum TLS version to use for all scalers that use raw HTTP clients (some scalers use SDKs to access target services. These have built-in HTTP clients, and this value does not necessarily apply to them) + minTlsVersion: TLS12 + +## Extra KEDA Operator and Metrics Adapter container arguments +extraArgs: + # -- Additional KEDA Operator container arguments + keda: {} + # -- Additional Metrics Adapter container arguments + metricsAdapter: {} + +# -- Additional environment variables that will be passed onto all KEDA components +env: [] +# - name: ENV_NAME +# value: 'ENV-VALUE' + +# Extra volumes and volume mounts for the deployment. Optional. +volumes: + keda: + # -- Extra volumes for KEDA deployment + extraVolumes: [] + # -- Extra volume mounts for KEDA deployment + extraVolumeMounts: [] + + metricsApiServer: + # -- Extra volumes for metric server deployment + extraVolumes: [] + # -- Extra volume mounts for metric server deployment + extraVolumeMounts: [] + + webhooks: + # -- Extra volumes for admission webhooks deployment + extraVolumes: [] + # -- Extra volume mounts for admission webhooks deployment + extraVolumeMounts: [] + +prometheus: + metricServer: + # -- Enable metric server Prometheus metrics expose + enabled: false + # -- HTTP port used for exposing metrics server prometheus metrics + port: 8080 + # -- HTTP port name for exposing metrics server prometheus metrics + portName: metrics + serviceMonitor: + # -- Enables ServiceMonitor creation for the Prometheus Operator + enabled: false + # -- JobLabel selects the label from the associated Kubernetes service which will be used as the job label for all metrics. [ServiceMonitor Spec] + jobLabel: "" + # -- TargetLabels transfers labels from the Kubernetes `Service` onto the created metrics + targetLabels: [] + # -- PodTargetLabels transfers labels on the Kubernetes `Pod` onto the created metrics + podTargetLabels: [] + # -- Name of the service port this endpoint refers to. Mutually exclusive with targetPort + port: metrics + # -- Name or number of the target port of the Pod behind the Service, the port must be specified with container port property. Mutually exclusive with port + targetPort: "" + # -- Interval at which metrics should be scraped If not specified Prometheus’ global scrape interval is used. + interval: "" + # -- Timeout after which the scrape is ended If not specified, the Prometheus global scrape timeout is used unless it is less than Interval in which the latter is used + scrapeTimeout: "" + # -- DEPRECATED. List of expressions that define custom relabeling rules for metric server ServiceMonitor crd (prometheus operator). [RelabelConfig Spec] + relabellings: [] + # -- List of expressions that define custom relabeling rules for metric server ServiceMonitor crd (prometheus operator). [RelabelConfig Spec] + relabelings: [] + # -- Additional labels to add for metric server using ServiceMonitor crd (prometheus operator) + additionalLabels: {} + podMonitor: + # -- Enables PodMonitor creation for the Prometheus Operator + enabled: false + # -- Scraping interval for metric server using podMonitor crd (prometheus operator) + interval: "" + # -- Scraping timeout for metric server using podMonitor crd (prometheus operator) + scrapeTimeout: "" + # -- Scraping namespace for metric server using podMonitor crd (prometheus operator) + namespace: "" + # -- Additional labels to add for metric server using podMonitor crd (prometheus operator) + additionalLabels: {} + # -- List of expressions that define custom relabeling rules for metric server podMonitor crd (prometheus operator) + relabelings: [] + operator: + # -- Enable KEDA Operator prometheus metrics expose + enabled: false + # -- Port used for exposing KEDA Operator prometheus metrics + port: 8080 + serviceMonitor: + # -- Enables ServiceMonitor creation for the Prometheus Operator + enabled: false + # -- JobLabel selects the label from the associated Kubernetes service which will be used as the job label for all metrics. [ServiceMonitor Spec] + jobLabel: "" + # -- TargetLabels transfers labels from the Kubernetes `Service` onto the created metrics + targetLabels: [] + # -- PodTargetLabels transfers labels on the Kubernetes `Pod` onto the created metrics + podTargetLabels: [] + # -- Name of the service port this endpoint refers to. Mutually exclusive with targetPort + port: metrics + # -- Name or number of the target port of the Pod behind the Service, + # the port must be specified with container port property. Mutually exclusive with port + targetPort: "" + # -- Interval at which metrics should be scraped If not specified Prometheus’ global scrape interval is used. + interval: "" + # -- Timeout after which the scrape is ended If not specified, the Prometheus global scrape timeout is used unless it is less than Interval in which the latter is used + scrapeTimeout: "" + # -- DEPRECATED. List of expressions that define custom relabeling rules for metric server ServiceMonitor crd (prometheus operator). [RelabelConfig Spec] + relabellings: [] + # -- List of expressions that define custom relabeling rules for metric server ServiceMonitor crd (prometheus operator). [RelabelConfig Spec] + relabelings: [] + # -- Additional labels to add for metric server using ServiceMonitor crd (prometheus operator) + additionalLabels: {} + podMonitor: + # -- Enables PodMonitor creation for the Prometheus Operator + enabled: false + # -- Scraping interval for KEDA Operator using podMonitor crd (prometheus operator) + interval: "" + # -- Scraping timeout for KEDA Operator using podMonitor crd (prometheus operator) + scrapeTimeout: "" + # -- Scraping namespace for KEDA Operator using podMonitor crd (prometheus operator) + namespace: "" + # -- Additional labels to add for KEDA Operator using podMonitor crd (prometheus operator) + additionalLabels: {} + # -- List of expressions that define custom relabeling rules for KEDA Operator podMonitor crd (prometheus operator) + relabelings: [] + prometheusRules: + # -- Enables PrometheusRules creation for the Prometheus Operator + enabled: false + # -- Scraping namespace for KEDA Operator using prometheusRules crd (prometheus operator) + namespace: "" + # -- Additional labels to add for KEDA Operator using prometheusRules crd (prometheus operator) + additionalLabels: {} + # -- Additional alerts to add for KEDA Operator using prometheusRules crd (prometheus operator) + alerts: + [] + # - alert: KedaScalerErrors + # annotations: + # description: Keda scaledObject {{ $labels.scaledObject }} is experiencing errors with {{ $labels.scaler }} scaler + # summary: Keda Scaler {{ $labels.scaler }} Errors + # expr: sum by ( scaledObject , scaler) (rate(keda_metrics_adapter_scaler_errors[2m])) > 0 + # for: 2m + # labels: + webhooks: + # -- Enable KEDA admission webhooks prometheus metrics expose + enabled: false + # -- Port used for exposing KEDA admission webhooks prometheus metrics + port: 8080 + serviceMonitor: + # -- Enables ServiceMonitor creation for the Prometheus webhooks + enabled: false + # -- jobLabel selects the label from the associated Kubernetes service which will be used as the job label for all metrics. [ServiceMonitor Spec] + jobLabel: "" + # -- TargetLabels transfers labels from the Kubernetes `Service` onto the created metrics + targetLabels: [] + # -- PodTargetLabels transfers labels on the Kubernetes `Pod` onto the created metrics + podTargetLabels: [] + # -- Name of the service port this endpoint refers to. Mutually exclusive with targetPort + port: metrics + # -- Name or number of the target port of the Pod behind the Service, the port must be specified with container port property. Mutually exclusive with port + targetPort: "" + # -- Interval at which metrics should be scraped If not specified Prometheus’ global scrape interval is used. + interval: "" + # -- Timeout after which the scrape is ended If not specified, the Prometheus global scrape timeout is used unless it is less than Interval in which the latter is used + scrapeTimeout: "" + # -- DEPRECATED. List of expressions that define custom relabeling rules for metric server ServiceMonitor crd (prometheus operator). [RelabelConfig Spec] + relabellings: [] + # -- List of expressions that define custom relabeling rules for metric server ServiceMonitor crd (prometheus operator). [RelabelConfig Spec] + relabelings: [] + # -- Additional labels to add for metric server using ServiceMonitor crd (prometheus operator) + additionalLabels: {} + prometheusRules: + # -- Enables PrometheusRules creation for the Prometheus Operator + enabled: false + # -- Scraping namespace for KEDA admission webhooks using prometheusRules crd (prometheus operator) + namespace: "" + # -- Additional labels to add for KEDA admission webhooks using prometheusRules crd (prometheus operator) + additionalLabels: {} + # -- Additional alerts to add for KEDA admission webhooks using prometheusRules crd (prometheus operator) + alerts: [] + +certificates: + # -- Enables the self generation for KEDA TLS certificates inside KEDA operator + autoGenerated: true + # -- Secret name to be mounted with KEDA TLS certificates + secretName: kedaorg-certs + # -- Path where KEDA TLS certificates are mounted + mountPath: /certs + certManager: + # -- Enables Cert-manager for certificate management + enabled: false + # -- Generates a self-signed CA with Cert-manager. + # If generateCA is false, the secret with the CA + # has to be annotated with `cert-manager.io/allow-direct-injection: "true"` + generateCA: true + # -- Secret name where the CA is stored (generatedby cert-manager or user given) + caSecretName: "kedaorg-ca" + # -- Add labels/annotations to secrets created by Certificate resources + # [docs](https://cert-manager.io/docs/usage/certificate/#creating-certificate-resources) + secretTemplate: {} + # annotations: + # my-secret-annotation-1: "foo" + # my-secret-annotation-2: "bar" + # labels: + # my-secret-label: foo + +permissions: + metricServer: + restrict: + # -- Restrict Secret Access for Metrics Server + secret: false + operator: + restrict: + # -- Restrict Secret Access for KEDA operator + secret: false + +# -- Array of extra K8s manifests to deploy +extraObjects: [] + # - apiVersion: keda.sh/v1alpha1 + # kind: ClusterTriggerAuthentication + # metadata: + # name: aws-credentials + # namespace: keda + # spec: + # podIdentity: + # provider: aws-eks + +# -- Capability to turn on/off ASCII art in Helm installation notes +asciiArt: true \ No newline at end of file diff --git a/addons/keda/locals.tf b/addons/keda/locals.tf new file mode 100644 index 0000000..5379eed --- /dev/null +++ b/addons/keda/locals.tf @@ -0,0 +1,41 @@ +locals { + name = "keda" + + default_helm_config = { + name = try(var.keda_extra_configs.name, local.name) + chart = try(var.keda_extra_configs.chart, local.name) + repository = try(var.keda_extra_configs.repository, "https://kedacore.github.io/charts") + version = try(var.keda_extra_configs.version, "2.11.2") + namespace = try(var.keda_extra_configs.namespace, "keda") + create_namespace = try(var.keda_extra_configs.create_namespace, true) + description = "Keda helm Chart deployment configuration" + timeout = try(var.keda_extra_configs.timeout, "600") + lint = try(var.keda_extra_configs.lint, "false") + repository_key_file = try(var.keda_extra_configs.repository_key_file, "") + repository_cert_file = try(var.keda_extra_configs.repository_cert_file, "") + repository_username = try(var.keda_extra_configs.repository_username, "") + repository_password = try(var.keda_extra_configs.repository_password, "") + verify = try(var.keda_extra_configs.verify, "false") + keyring = try(var.keda_extra_configs.keyring, "") + disable_webhooks = try(var.keda_extra_configs.disable_webhooks, "false") + reuse_values = try(var.keda_extra_configs.reuse_values, "false") + reset_values = try(var.keda_extra_configs.reset_values, "false") + force_update = try(var.keda_extra_configs.force_update, "false") + recreate_pods = try(var.keda_extra_configs.recreate_pods, "false") + cleanup_on_fail = try(var.keda_extra_configs.cleanup_on_fail, "false") + max_history = try(var.keda_extra_configs.max_history, "0") + atomic = try(var.keda_extra_configs.atomic, "false") + skip_crds = try(var.keda_extra_configs.skip_crds, "false") + render_subchart_notes = try(var.keda_extra_configs.render_subchart_notes, "true") + disable_openapi_validation = try(var.keda_extra_configs.disable_openapi_validation, "false") + wait = try(var.keda_extra_configs.wait, "true") + wait_for_jobs = try(var.keda_extra_configs.wait_for_jobs, "false") + dependency_update = try(var.keda_extra_configs.dependency_update, "false") + replace = try(var.keda_extra_configs.replace, "false") + } + + helm_config = merge( + local.default_helm_config, + var.helm_config + ) +} \ No newline at end of file diff --git a/addons/keda/main.tf b/addons/keda/main.tf new file mode 100644 index 0000000..83b8786 --- /dev/null +++ b/addons/keda/main.tf @@ -0,0 +1,4 @@ +module "helm_addon" { + source = "../helm" + helm_config = local.helm_config +} \ No newline at end of file diff --git a/addons/keda/outputs.tf b/addons/keda/outputs.tf new file mode 100644 index 0000000..a3832b1 --- /dev/null +++ b/addons/keda/outputs.tf @@ -0,0 +1,11 @@ +output "namespace" { + value = local.default_helm_config.namespace +} + +output "chart_version" { + value = local.default_helm_config.version +} + +output "repository" { + value = local.default_helm_config.repository +} \ No newline at end of file diff --git a/addons/keda/variables.tf b/addons/keda/variables.tf new file mode 100644 index 0000000..67b7657 --- /dev/null +++ b/addons/keda/variables.tf @@ -0,0 +1,12 @@ +variable "helm_config" { + description = "Helm provider config for Keda" + type = any + default = {} +} + +variable "keda_extra_configs" { + description = "Override attributes of helm_release terraform resource" + type = any + default = {} +} + diff --git a/addons/keda/version.tf b/addons/keda/version.tf new file mode 100644 index 0000000..953af1f --- /dev/null +++ b/addons/keda/version.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + google = { + source = "hashicorp/google" + version = ">= 5.10.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + } +} \ No newline at end of file diff --git a/addons/reloader/README.md b/addons/reloader/README.md new file mode 100644 index 0000000..d6c8df0 --- /dev/null +++ b/addons/reloader/README.md @@ -0,0 +1,39 @@ + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [google](#requirement\_google) | >= 5.10.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | + +## Providers + +No providers. + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [helm\_addon](#module\_helm\_addon) | ../helm | n/a | + +## Resources + +No resources. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [helm\_config](#input\_helm\_config) | Helm provider config for Cluster Autoscaler | `any` | `{}` | no | +| [reloader\_extra\_configs](#input\_reloader\_extra\_configs) | Override attributes of helm\_release terraform resource | `any` | `{}` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [chart\_version](#output\_chart\_version) | n/a | +| [namespace](#output\_namespace) | n/a | +| [repository](#output\_repository) | n/a | +| [service\_account](#output\_service\_account) | n/a | + \ No newline at end of file diff --git a/addons/reloader/config/reloader.yaml b/addons/reloader/config/reloader.yaml new file mode 100644 index 0000000..a2cf043 --- /dev/null +++ b/addons/reloader/config/reloader.yaml @@ -0,0 +1,288 @@ +# Generated from deployments/kubernetes/templates/chart/values.yaml.tmpl +global: + ## Reference to one or more secrets to be used when pulling images + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + imagePullSecrets: [] + +kubernetes: + host: https://kubernetes.default + +nameOverride: "" +fullnameOverride: "" + +reloader: + autoReloadAll: false + isArgoRollouts: false + isOpenshift: false + ignoreSecrets: false + ignoreConfigMaps: false + reloadOnCreate: false + syncAfterRestart: false + reloadStrategy: default # Set to default, env-vars or annotations + ignoreNamespaces: "" # Comma separated list of namespaces to ignore + namespaceSelector: "" # Comma separated list of k8s label selectors for namespaces selection + resourceLabelSelector: "" # Comma separated list of k8s label selectors for configmap/secret selection + logFormat: "" #json + watchGlobally: true + # Set to true to enable leadership election allowing you to run multiple replicas + enableHA: false + # Set to true if you have a pod security policy that enforces readOnlyRootFilesystem + readOnlyRootFileSystem: false + legacy: + rbac: false + matchLabels: {} + deployment: + # If you wish to run multiple replicas set reloader.enableHA = true + replicas: 1 + + revisionHistoryLimit: 2 + + nodeSelector: + # cloud.google.com/gke-nodepool: default-pool + + # An affinity stanza to be applied to the Deployment. + # Example: + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: "node-role.kubernetes.io/infra-worker" + # operator: "Exists" + affinity: {} + + securityContext: + runAsNonRoot: true + runAsUser: 65534 + + containerSecurityContext: {} + # capabilities: + # drop: + # - ALL + # allowPrivilegeEscalation: false + # readOnlyRootFilesystem: true + + # A list of tolerations to be applied to the Deployment. + # Example: + # tolerations: + # - key: "node-role.kubernetes.io/infra-worker" + # operator: "Exists" + # effect: "NoSchedule" + tolerations: [] + + # Topology spread constraints for pod assignment + # Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + # Example: + # topologySpreadConstraints: + # - maxSkew: 1 + # topologyKey: zone + # whenUnsatisfiable: DoNotSchedule + # labelSelector: + # matchLabels: + # app: my-app + topologySpreadConstraints: [] + + annotations: {} + labels: + provider: stakater + group: com.stakater.platform + version: v1.0.63 + image: + name: ghcr.io/stakater/reloader + tag: v1.0.63 + pullPolicy: IfNotPresent + # Support for extra environment variables. + env: + # Open supports Key value pair as environment variables. + open: + # secret supports Key value pair as environment variables. It gets the values based on keys from default reloader secret if any. + secret: + # ALERT_ON_RELOAD: <"true"|"false"> + # ALERT_SINK: <"slack"> # By default it will be a raw text based webhook + # ALERT_WEBHOOK_URL: <"webhook_url"> + # ALERT_ADDITIONAL_INFO: <"Additional Info like Cluster Name if needed"> + # field supports Key value pair as environment variables. It gets the values from other fields of pod. + field: + # existing secret, you can specify multiple existing secrets, for each + # specify the env var name followed by the key in existing secret that + # will be used to populate the env var + existing: + # existing_secret_name: + # ALERT_ON_RELOAD: alert_on_reload_key + # ALERT_SINK: alert_sink_key + # ALERT_WEBHOOK_URL: alert_webhook_key + # ALERT_ADDITIONAL_INFO: alert_additional_info_key + + # Liveness and readiness probe timeout values. + livenessProbe: {} + # timeoutSeconds: 5 + # failureThreshold: 5 + # periodSeconds: 10 + # successThreshold: 1 + readinessProbe: {} + # timeoutSeconds: 15 + # failureThreshold: 5 + # periodSeconds: 10 + # successThreshold: 1 + + # Specify resource requests/limits for the deployment. + # Example: + # resources: + # limits: + # cpu: "100m" + # memory: "512Mi" + # requests: + # cpu: "10m" + # memory: "128Mi" + resources: {} + pod: + annotations: {} + priorityClassName: "" + # imagePullSecrets: + # - name: myregistrykey + + service: {} + # labels: {} + # annotations: {} + # port: 9090 + + rbac: + enabled: true + labels: {} + # Service account config for the agent pods + serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + labels: {} + annotations: {} + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + # Optional flags to pass to the Reloader entrypoint + # Example: + # custom_annotations: + # configmap: "my.company.com/configmap" + # secret: "my.company.com/secret" + custom_annotations: {} + + serviceMonitor: + # Deprecated: Service monitor will be removed in future releases of reloader in favour of Pod monitor + # Enabling this requires service to be enabled as well, or no endpoints will be found + enabled: false + # Set the namespace the ServiceMonitor should be deployed + # namespace: monitoring + + # Fallback to the prometheus default unless specified + # interval: 10s + + ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. + # scheme: "" + + ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS. + ## Of type: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#tlsconfig + # tlsConfig: {} + + # bearerTokenFile: + # Fallback to the prometheus default unless specified + # timeout: 30s + + ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + labels: {} + + ## Used to pass annotations that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + annotations: {} + + # Retain the job and instance labels of the metrics pushed to the Pushgateway + # [Scraping Pushgateway](https://github.com/prometheus/pushgateway#configure-the-pushgateway-as-a-target-to-scrape) + honorLabels: true + + ## Metric relabel configs to apply to samples before ingestion. + ## [Metric Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs) + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## Relabel configs to apply to samples before ingestion. + ## [Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + targetLabels: [] + + podMonitor: + enabled: false + # Set the namespace the podMonitor should be deployed + # namespace: monitoring + + # Fallback to the prometheus default unless specified + # interval: 10s + + ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. + # scheme: "" + + ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS. + ## Of type: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#tlsconfig + # tlsConfig: {} + + # bearerTokenSecret: + # Fallback to the prometheus default unless specified + # timeout: 30s + + ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + labels: {} + + ## Used to pass annotations that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + annotations: {} + + # Retain the job and instance labels of the metrics pushed to the Pushgateway + # [Scraping Pushgateway](https://github.com/prometheus/pushgateway#configure-the-pushgateway-as-a-target-to-scrape) + honorLabels: true + + ## Metric relabel configs to apply to samples before ingestion. + ## [Metric Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs) + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## Relabel configs to apply to samples before ingestion. + ## [Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + podTargetLabels: [] + + podDisruptionBudget: + enabled: false + # Set the minimum available replicas + # minAvailable: 1 + + netpol: + enabled: false + from: [] + # - podSelector: + # matchLabels: + # app.kubernetes.io/name: prometheus + to: [] + + volumeMounts: [] + + volumes: [] + + webhookUrl: "" diff --git a/addons/reloader/locals.tf b/addons/reloader/locals.tf new file mode 100644 index 0000000..62f42db --- /dev/null +++ b/addons/reloader/locals.tf @@ -0,0 +1,41 @@ +locals { + name = "reloader" + + default_helm_config = { + name = try(var.reloader_extra_configs.name, local.name) + chart = try(var.reloader_extra_configs.chart, local.name) + repository = try(var.reloader_extra_configs.repository, "https://stakater.github.io/stakater-charts") + version = try(var.reloader_extra_configs.version, "1.0.41") + namespace = try(var.reloader_extra_configs.namespace, "kube-system") + create_namespace = try(var.reloader_extra_configs.create_namespace, true) + description = "Reloader helm Chart deployment configuration" + timeout = try(var.reloader_extra_configs.timeout, "600") + lint = try(var.reloader_extra_configs.lint, "false") + repository_key_file = try(var.reloader_extra_configs.repository_key_file, "") + repository_cert_file = try(var.reloader_extra_configs.repository_cert_file, "") + repository_username = try(var.reloader_extra_configs.repository_username, "") + repository_password = try(var.reloader_extra_configs.repository_password, "") + verify = try(var.reloader_extra_configs.verify, "false") + keyring = try(var.reloader_extra_configs.keyring, "") + disable_webhooks = try(var.reloader_extra_configs.disable_webhooks, "false") + reuse_values = try(var.reloader_extra_configs.reuse_values, "false") + reset_values = try(var.reloader_extra_configs.reset_values, "false") + force_update = try(var.reloader_extra_configs.force_update, "false") + recreate_pods = try(var.reloader_extra_configs.recreate_pods, "false") + cleanup_on_fail = try(var.reloader_extra_configs.cleanup_on_fail, "false") + max_history = try(var.reloader_extra_configs.max_history, "0") + atomic = try(var.reloader_extra_configs.atomic, "false") + skip_crds = try(var.reloader_extra_configs.skip_crds, "false") + render_subchart_notes = try(var.reloader_extra_configs.render_subchart_notes, "true") + disable_openapi_validation = try(var.reloader_extra_configs.disable_openapi_validation, "false") + wait = try(var.reloader_extra_configs.wait, "true") + wait_for_jobs = try(var.reloader_extra_configs.wait_for_jobs, "false") + dependency_update = try(var.reloader_extra_configs.dependency_update, "false") + replace = try(var.reloader_extra_configs.replace, "false") + } + + helm_config = merge( + local.default_helm_config, + var.helm_config + ) +} \ No newline at end of file diff --git a/addons/reloader/main.tf b/addons/reloader/main.tf new file mode 100644 index 0000000..83b8786 --- /dev/null +++ b/addons/reloader/main.tf @@ -0,0 +1,4 @@ +module "helm_addon" { + source = "../helm" + helm_config = local.helm_config +} \ No newline at end of file diff --git a/addons/reloader/output.tf b/addons/reloader/output.tf new file mode 100644 index 0000000..76afe98 --- /dev/null +++ b/addons/reloader/output.tf @@ -0,0 +1,15 @@ +output "service_account" { + value = "${local.name}-sa" +} + +output "namespace" { + value = local.default_helm_config.namespace +} + +output "chart_version" { + value = local.default_helm_config.version +} + +output "repository" { + value = local.default_helm_config.repository +} \ No newline at end of file diff --git a/addons/reloader/variables.tf b/addons/reloader/variables.tf new file mode 100644 index 0000000..8da1cf2 --- /dev/null +++ b/addons/reloader/variables.tf @@ -0,0 +1,13 @@ +variable "helm_config" { + description = "Helm provider config for Cluster Autoscaler" + type = any + default = {} +} + +variable "reloader_extra_configs" { + description = "Override attributes of helm_release terraform resource" + type = any + default = {} +} + + diff --git a/addons/reloader/version.tf b/addons/reloader/version.tf new file mode 100644 index 0000000..953af1f --- /dev/null +++ b/addons/reloader/version.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + google = { + source = "hashicorp/google" + version = ">= 5.10.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + } +} \ No newline at end of file diff --git a/data.tf b/data.tf new file mode 100644 index 0000000..1a179eb --- /dev/null +++ b/data.tf @@ -0,0 +1,4 @@ +data "google_container_cluster" "my_cluster" { + name = var.gke_cluster_name + location = var.region +} \ No newline at end of file diff --git a/locals.tf b/locals.tf new file mode 100644 index 0000000..44e4f00 --- /dev/null +++ b/locals.tf @@ -0,0 +1 @@ +locals {} \ No newline at end of file diff --git a/main.tf b/main.tf index 6b53c8f..9e60433 100644 --- a/main.tf +++ b/main.tf @@ -1,6 +1,37 @@ -# ------------------------------------------------------------------------------ -# Resources -# ------------------------------------------------------------------------------ -locals { - label_order = var.label_order +module "cluster_autoscaler" { + source = "./addons/cluster-autoscaler" + count = var.cluster_autoscaler ? 1 : 0 + project_id = var.project_id + gke_cluster_name = data.google_container_cluster.my_cluster.name + cluster_autoscaler_extra_configs = var.cluster_autoscaler_extra_configs + helm_config = var.cluster_autoscaler_helm_config != null ? var.cluster_autoscaler_helm_config : { values = [local_file.cluster_autoscaler_helm_config[count.index].content] } +} + +module "reloader" { + source = "./addons/reloader" + count = var.reloader ? 1 : 0 + reloader_extra_configs = var.reloader_extra_configs + helm_config = var.reloader_helm_config != null ? var.reloader_helm_config : { values = [local_file.reloader_helm_config[count.index].content] } +} + +module "ingress_nginx" { + source = "./addons/ingress-nginx" + count = var.ingress_nginx ? 1 : 0 + ingress_nginx_extra_configs = var.ingress_nginx_extra_configs + helm_config = var.ingress_nginx_helm_config != null ? var.ingress_nginx_helm_config : { values = [local_file.ingress_nginx_helm_config[count.index].content] } +} + + +module "certification_manager" { + source = "./addons/cert-manager" + count = var.certification_manager ? 1 : 0 + certification_manager_extra_configs = var.certification_manager_extra_configs + helm_config = var.certification_manager_helm_config != null ? var.certification_manager_helm_config : { values = [local_file.certification_manager_helm_config[count.index].content] } +} + +module "keda" { + source = "./addons/keda" + count = var.keda ? 1 : 0 + keda_extra_configs = var.keda_extra_configs + helm_config = var.keda_helm_config != null ? var.keda_helm_config : { values = [local_file.keda_helm_config[count.index].content] } } diff --git a/modules/workload_identity/main.tf b/modules/workload_identity/main.tf new file mode 100644 index 0000000..5500885 --- /dev/null +++ b/modules/workload_identity/main.tf @@ -0,0 +1,11 @@ +resource "google_service_account" "gke" { + project = var.project_id + account_id = format("%s", var.GCP_GSA_NAME) + display_name = "Service Account for Keda Scaler" +} + +resource "google_service_account_iam_member" "pod_identity" { + role = "roles/iam.workloadIdentityUser" + member = "serviceAccount:${var.project_id}.svc.id.goog[${var.namespace}/${var.GCP_KSA_NAME}]" + service_account_id = google_service_account.gke.name +} \ No newline at end of file diff --git a/modules/workload_identity/variables.tf b/modules/workload_identity/variables.tf new file mode 100644 index 0000000..f4e4c85 --- /dev/null +++ b/modules/workload_identity/variables.tf @@ -0,0 +1,19 @@ +variable "project_id" { + description = "GCP project ID" + type = string +} + +variable "GCP_GSA_NAME" { + description = "Google Cloud Service Account name" + type = string +} + +variable "GCP_KSA_NAME" { + description = "Google Kubernetes Service Account name" + type = string +} + +variable "namespace" { + description = "namespace for addons" + type = string +} \ No newline at end of file diff --git a/modules/workload_identity/versions.tf b/modules/workload_identity/versions.tf new file mode 100644 index 0000000..953af1f --- /dev/null +++ b/modules/workload_identity/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + google = { + source = "hashicorp/google" + version = ">= 5.10.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + } +} \ No newline at end of file diff --git a/outputs.tf b/outputs.tf index ff3d337..113c3ee 100644 --- a/outputs.tf +++ b/outputs.tf @@ -1,7 +1,69 @@ -# ------------------------------------------------------------------------------ -# Outputs -# ------------------------------------------------------------------------------ -output "label_order" { - value = local.label_order - description = "Label order." +#----------- CLUSTER AUTOSCALER ---------------- +output "cluster_autoscaler_namespace" { + value = module.cluster_autoscaler[*].namespace + description = "Namespace where cluster-autoscaler is installed" +} +output "cluster_autoscaler_chart_version" { + value = module.cluster_autoscaler[*].chart_version + description = "chart version used for cluster-autoscaler helmchart" +} +output "cluster_autoscaler_repository" { + value = module.cluster_autoscaler[*].repository + description = "helm repository url of cluster-autoscaler" +} + +#----------- CERTICATION-MANAGER ------------------------ +output "certification_manager_namespace" { + value = module.certification_manager[*].namespace + description = "The namespace where certification-manager is deployed." +} +output "certification_manager_chart_version" { + value = module.certification_manager[*].chart_version + description = "Chart version of the certification-manager Helm Chart." +} +output "certification_manager_repository" { + value = module.certification_manager[*].repository + description = "Helm chart repository of the certification-manager." +} + +#----------- RELOADER ---------------------------------- +output "reloader_namespace" { + value = module.reloader[*].namespace + description = "The namespace where reloader is deployed." +} +output "reloader_chart_version" { + value = module.reloader[*].chart_version + description = "Chart version of the reloader Helm Chart." +} +output "reloader_repository" { + value = module.reloader[*].repository + description = "Helm chart repository of the reloader." +} + +#----------- INGRESS NGINX --------------------- +output "ingress-nginx_namespace" { + value = module.ingress_nginx[*].namespace + description = "Namespace where ingress-nginx is installed" +} +output "ingress-nginx_chart_version" { + value = module.ingress_nginx[*].chart_version + description = "chart version used for ingress-nginx helmchart" +} +output "ingress-nginx_repository" { + value = module.ingress_nginx[*].repository + description = "helm repository url of ingress-nginx" +} + +#----------- KEDA --------------------- +output "keda_namespace" { + value = module.keda[*].namespace + description = "Namespace where keda is installed" +} +output "keda_chart_version" { + value = module.keda[*].chart_version + description = "chart version used for keda helmchart" +} +output "keda_repository" { + value = module.keda[*].repository + description = "helm repository url of keda" } diff --git a/override_values.tf b/override_values.tf new file mode 100644 index 0000000..fb24f32 --- /dev/null +++ b/override_values.tf @@ -0,0 +1,165 @@ +#----------------------------- CLUSTER AUTOSCALER ---------------------------- +resource "local_file" "cluster_autoscaler_helm_config" { + count = var.cluster_autoscaler && (var.cluster_autoscaler_helm_config == null) ? 1 : 0 + content = <