diff --git a/main/aws-partner-addons/index.html b/main/aws-partner-addons/index.html index 924dc004..e598de4a 100644 --- a/main/aws-partner-addons/index.html +++ b/main/aws-partner-addons/index.html @@ -801,6 +801,10 @@
Terraform module to deploy Kubernetes addons on Amazon EKS clusters.
"},{"location":"#usage","title":"Usage","text":"module \"eks\" {\nsource = \"terraform-aws-modules/eks/aws\"\ncluster_name = \"my-cluster\"\ncluster_version = \"1.27\"\n... truncated for brevity\n}\nmodule \"eks_blueprints_addons\" {\nsource = \"aws-ia/eks-blueprints-addons/aws\"\nversion = \"~> 1.0\" #ensure to update this to the latest/desired version\ncluster_name = module.eks.cluster_name\ncluster_endpoint = module.eks.cluster_endpoint\ncluster_version = module.eks.cluster_version\noidc_provider_arn = module.eks.oidc_provider_arn\neks_addons = {\naws-ebs-csi-driver = {\nmost_recent = true\n}\ncoredns = {\nmost_recent = true\n}\nvpc-cni = {\nmost_recent = true\n}\nkube-proxy = {\nmost_recent = true\n}\n}\nenable_aws_load_balancer_controller = true\nenable_cluster_proportional_autoscaler = true\nenable_karpenter = true\nenable_kube_prometheus_stack = true\nenable_metrics_server = true\nenable_external_dns = true\nenable_cert_manager = true\ncert_manager_route53_hosted_zone_arns = [\"arn:aws:route53:::hostedzone/XXXXXXXXXXXXX\"]\ntags = {\nEnvironment = \"dev\"\n}\n}\n
"},{"location":"#requirements","title":"Requirements","text":"Name Version terraform >= 1.0 aws >= 5.0 helm >= 2.9 kubernetes >= 2.20 time >= 0.9"},{"location":"#providers","title":"Providers","text":"Name Version aws >= 5.0 helm >= 2.9 kubernetes >= 2.20 time >= 0.9"},{"location":"#modules","title":"Modules","text":"Name Source Version argo_events aws-ia/eks-blueprints-addon/aws 1.1.0 argo_rollouts aws-ia/eks-blueprints-addon/aws 1.1.0 argo_workflows aws-ia/eks-blueprints-addon/aws 1.1.0 argocd aws-ia/eks-blueprints-addon/aws 1.1.0 aws_cloudwatch_metrics aws-ia/eks-blueprints-addon/aws 1.1.0 aws_efs_csi_driver aws-ia/eks-blueprints-addon/aws 1.1.0 aws_for_fluentbit aws-ia/eks-blueprints-addon/aws 1.1.0 aws_fsx_csi_driver aws-ia/eks-blueprints-addon/aws 1.1.0 aws_gateway_api_controller aws-ia/eks-blueprints-addon/aws 1.1.0 aws_load_balancer_controller aws-ia/eks-blueprints-addon/aws 1.1.0 aws_node_termination_handler aws-ia/eks-blueprints-addon/aws 1.1.0 aws_node_termination_handler_sqs terraform-aws-modules/sqs/aws 4.0.1 aws_privateca_issuer aws-ia/eks-blueprints-addon/aws 1.1.0 cert_manager aws-ia/eks-blueprints-addon/aws 1.1.0 cluster_autoscaler aws-ia/eks-blueprints-addon/aws 1.1.0 cluster_proportional_autoscaler aws-ia/eks-blueprints-addon/aws 1.1.0 external_dns aws-ia/eks-blueprints-addon/aws 1.1.0 external_secrets aws-ia/eks-blueprints-addon/aws 1.1.0 gatekeeper aws-ia/eks-blueprints-addon/aws 1.1.0 ingress_nginx aws-ia/eks-blueprints-addon/aws 1.1.0 karpenter aws-ia/eks-blueprints-addon/aws 1.1.0 karpenter_sqs terraform-aws-modules/sqs/aws 4.0.1 kube_prometheus_stack aws-ia/eks-blueprints-addon/aws 1.1.0 metrics_server aws-ia/eks-blueprints-addon/aws 1.1.0 secrets_store_csi_driver aws-ia/eks-blueprints-addon/aws 1.1.0 secrets_store_csi_driver_provider_aws aws-ia/eks-blueprints-addon/aws 1.1.0 velero aws-ia/eks-blueprints-addon/aws 1.1.0 vpa aws-ia/eks-blueprints-addon/aws 1.1.0"},{"location":"#resources","title":"Resources","text":"Name Type aws_autoscaling_group_tag.aws_node_termination_handler resource aws_autoscaling_lifecycle_hook.aws_node_termination_handler resource aws_cloudwatch_event_rule.aws_node_termination_handler resource aws_cloudwatch_event_rule.karpenter resource aws_cloudwatch_event_target.aws_node_termination_handler resource aws_cloudwatch_event_target.karpenter resource aws_cloudwatch_log_group.aws_for_fluentbit resource aws_cloudwatch_log_group.fargate_fluentbit resource aws_eks_addon.this resource aws_iam_instance_profile.karpenter resource aws_iam_policy.fargate_fluentbit resource aws_iam_role.karpenter resource aws_iam_role_policy_attachment.additional resource aws_iam_role_policy_attachment.karpenter resource helm_release.this resource kubernetes_config_map_v1.aws_logging resource kubernetes_namespace_v1.aws_observability resource time_sleep.this resource aws_caller_identity.current data source aws_eks_addon_version.this data source aws_iam_policy_document.aws_efs_csi_driver data source aws_iam_policy_document.aws_for_fluentbit data source aws_iam_policy_document.aws_fsx_csi_driver data source aws_iam_policy_document.aws_gateway_api_controller data source aws_iam_policy_document.aws_load_balancer_controller data source aws_iam_policy_document.aws_node_termination_handler data source aws_iam_policy_document.aws_privateca_issuer data source aws_iam_policy_document.cert_manager data source aws_iam_policy_document.cluster_autoscaler data source aws_iam_policy_document.external_dns data source aws_iam_policy_document.external_secrets data source aws_iam_policy_document.fargate_fluentbit data source aws_iam_policy_document.karpenter data source aws_iam_policy_document.karpenter_assume_role data source aws_iam_policy_document.velero data source aws_partition.current data source aws_region.current data source"},{"location":"#inputs","title":"Inputs","text":"Name Description Type Default Required argo_events Argo Events add-on configuration values any
{}
no argo_rollouts Argo Rollouts add-on configuration values any
{}
no argo_workflows Argo Workflows add-on configuration values any
{}
no argocd ArgoCD add-on configuration values any
{}
no aws_cloudwatch_metrics Cloudwatch Metrics add-on configuration values any
{}
no aws_efs_csi_driver EFS CSI Driver add-on configuration values any
{}
no aws_for_fluentbit AWS Fluentbit add-on configurations any
{}
no aws_for_fluentbit_cw_log_group AWS Fluentbit CloudWatch Log Group configurations any
{}
no aws_fsx_csi_driver FSX CSI Driver add-on configuration values any
{}
no aws_gateway_api_controller AWS Gateway API Controller add-on configuration values any
{}
no aws_load_balancer_controller AWS Load Balancer Controller add-on configuration values any
{}
no aws_node_termination_handler AWS Node Termination Handler add-on configuration values any
{}
no aws_node_termination_handler_asg_arns List of Auto Scaling group ARNs that AWS Node Termination Handler will monitor for EC2 events list(string)
[]
no aws_node_termination_handler_sqs AWS Node Termination Handler SQS queue configuration values any
{}
no aws_privateca_issuer AWS PCA Issuer add-on configurations any
{}
no cert_manager cert-manager add-on configuration values any
{}
no cert_manager_route53_hosted_zone_arns List of Route53 Hosted Zone ARNs that are used by cert-manager to create DNS records list(string)
[ \"arn:aws:route53:::hostedzone/*\"]no cluster_autoscaler Cluster Autoscaler add-on configuration values
any
{}
no cluster_endpoint Endpoint for your Kubernetes API server string
n/a yes cluster_name Name of the EKS cluster string
n/a yes cluster_proportional_autoscaler Cluster Proportional Autoscaler add-on configurations any
{}
no cluster_version Kubernetes <major>.<minor>
version to use for the EKS cluster (i.e.: 1.24
) string
n/a yes create_delay_dependencies Dependency attribute which must be resolved before starting the create_delay_duration
list(string)
[]
no create_delay_duration The duration to wait before creating resources string
\"30s\"
no eks_addons Map of EKS add-on configurations to enable for the cluster. Add-on name can be the map keys or set with name
any
{}
no eks_addons_timeouts Create, update, and delete timeout configurations for the EKS add-ons map(string)
{}
no enable_argo_events Enable Argo Events add-on bool
false
no enable_argo_rollouts Enable Argo Rollouts add-on bool
false
no enable_argo_workflows Enable Argo workflows add-on bool
false
no enable_argocd Enable Argo CD Kubernetes add-on bool
false
no enable_aws_cloudwatch_metrics Enable AWS Cloudwatch Metrics add-on for Container Insights bool
false
no enable_aws_efs_csi_driver Enable AWS EFS CSI Driver add-on bool
false
no enable_aws_for_fluentbit Enable AWS for FluentBit add-on bool
false
no enable_aws_fsx_csi_driver Enable AWS FSX CSI Driver add-on bool
false
no enable_aws_gateway_api_controller Enable AWS Gateway API Controller add-on bool
false
no enable_aws_load_balancer_controller Enable AWS Load Balancer Controller add-on bool
false
no enable_aws_node_termination_handler Enable AWS Node Termination Handler add-on bool
false
no enable_aws_privateca_issuer Enable AWS PCA Issuer bool
false
no enable_cert_manager Enable cert-manager add-on bool
false
no enable_cluster_autoscaler Enable Cluster autoscaler add-on bool
false
no enable_cluster_proportional_autoscaler Enable Cluster Proportional Autoscaler bool
false
no enable_external_dns Enable external-dns operator add-on bool
false
no enable_external_secrets Enable External Secrets operator add-on bool
false
no enable_fargate_fluentbit Enable Fargate FluentBit add-on bool
false
no enable_gatekeeper Enable Gatekeeper add-on bool
false
no enable_ingress_nginx Enable Ingress Nginx bool
false
no enable_karpenter Enable Karpenter controller add-on bool
false
no enable_kube_prometheus_stack Enable Kube Prometheus Stack bool
false
no enable_metrics_server Enable metrics server add-on bool
false
no enable_secrets_store_csi_driver Enable CSI Secrets Store Provider bool
false
no enable_secrets_store_csi_driver_provider_aws Enable AWS CSI Secrets Store Provider bool
false
no enable_velero Enable Kubernetes Dashboard add-on bool
false
no enable_vpa Enable Vertical Pod Autoscaler add-on bool
false
no external_dns external-dns add-on configuration values any
{}
no external_dns_route53_zone_arns List of Route53 zones ARNs which external-dns will have access to create/manage records (if using Route53) list(string)
[]
no external_secrets External Secrets add-on configuration values any
{}
no external_secrets_kms_key_arns List of KMS Key ARNs that are used by Secrets Manager that contain secrets to mount using External Secrets list(string)
[ \"arn:aws:kms:::key/*\"]no external_secrets_secrets_manager_arns List of Secrets Manager ARNs that contain secrets to mount using External Secrets
list(string)
[ \"arn:aws:secretsmanager:::secret:*\"]no external_secrets_ssm_parameter_arns List of Systems Manager Parameter ARNs that contain secrets to mount using External Secrets
list(string)
[ \"arn:aws:ssm:::parameter/*\"]no fargate_fluentbit Fargate fluentbit add-on config
any
{}
no fargate_fluentbit_cw_log_group AWS Fargate Fluentbit CloudWatch Log Group configurations any
{}
no gatekeeper Gatekeeper add-on configuration any
{}
no helm_releases A map of Helm releases to create. This provides the ability to pass in an arbitrary map of Helm chart definitions to create any
{}
no ingress_nginx Ingress Nginx add-on configurations any
{}
no karpenter Karpenter add-on configuration values any
{}
no karpenter_enable_spot_termination Determines whether to enable native node termination handling bool
true
no karpenter_node Karpenter IAM role and IAM instance profile configuration values any
{}
no karpenter_sqs Karpenter SQS queue for native node termination handling configuration values any
{}
no kube_prometheus_stack Kube Prometheus Stack add-on configurations any
{}
no metrics_server Metrics Server add-on configurations any
{}
no oidc_provider_arn The ARN of the cluster OIDC Provider string
n/a yes secrets_store_csi_driver CSI Secrets Store Provider add-on configurations any
{}
no secrets_store_csi_driver_provider_aws CSI Secrets Store Provider add-on configurations any
{}
no tags A map of tags to add to all resources map(string)
{}
no velero Velero add-on configuration values any
{}
no vpa Vertical Pod Autoscaler add-on configuration values any
{}
no"},{"location":"#outputs","title":"Outputs","text":"Name Description argo_events Map of attributes of the Helm release created argo_rollouts Map of attributes of the Helm release created argo_workflows Map of attributes of the Helm release created argocd Map of attributes of the Helm release created aws_cloudwatch_metrics Map of attributes of the Helm release and IRSA created aws_efs_csi_driver Map of attributes of the Helm release and IRSA created aws_for_fluentbit Map of attributes of the Helm release and IRSA created aws_fsx_csi_driver Map of attributes of the Helm release and IRSA created aws_gateway_api_controller Map of attributes of the Helm release and IRSA created aws_load_balancer_controller Map of attributes of the Helm release and IRSA created aws_node_termination_handler Map of attributes of the Helm release and IRSA created aws_privateca_issuer Map of attributes of the Helm release and IRSA created cert_manager Map of attributes of the Helm release and IRSA created cluster_autoscaler Map of attributes of the Helm release and IRSA created cluster_proportional_autoscaler Map of attributes of the Helm release and IRSA created eks_addons Map of attributes for each EKS addons enabled external_dns Map of attributes of the Helm release and IRSA created external_secrets Map of attributes of the Helm release and IRSA created fargate_fluentbit Map of attributes of the configmap and IAM policy created gatekeeper Map of attributes of the Helm release and IRSA created helm_releases Map of attributes of the Helm release created ingress_nginx Map of attributes of the Helm release and IRSA created karpenter Map of attributes of the Helm release and IRSA created kube_prometheus_stack Map of attributes of the Helm release and IRSA created metrics_server Map of attributes of the Helm release and IRSA created secrets_store_csi_driver Map of attributes of the Helm release and IRSA created secrets_store_csi_driver_provider_aws Map of attributes of the Helm release and IRSA created velero Map of attributes of the Helm release and IRSA created vpa Map of attributes of the Helm release and IRSA created"},{"location":"amazon-eks-addons/","title":"Amazon EKS Add-ons","text":"The Amazon EKS add-on implementation is generic and can be used to deploy any add-on supported by the EKS API; either native EKS addons or third party add-ons supplied via the AWS Marketplace.
See the EKS documentation for more details on EKS addon-ons, including the list of Amazon EKS add-ons from Amazon EKS, as well as Additional Amazon EKS add-ons from independent software vendors.
"},{"location":"amazon-eks-addons/#architecture-support","title":"Architecture Support","text":"The Amazon EKS provided add-ons listed below support both x86_64/amd64
and arm64
architectures. Third party add-ons that are available via the AWS Marketplace will vary based on the support provided by the add-on vendor. No additional changes are required to add-on configurations when switching between x86_64/amd64
and arm64
architectures; Amazon EKS add-ons utilize multi-architecture container images to support this functionality.
vpc-cni
\u2705 \u2705 aws-ebs-csi-driver
\u2705 \u2705 coredns
\u2705 \u2705 kube-proxy
\u2705 \u2705 adot
\u2705 \u2705 aws-guardduty-agent
\u2705 \u2705"},{"location":"amazon-eks-addons/#usage","title":"Usage","text":"The Amazon EKS add-ons are provisioned via a generic interface behind the eks_addons
argument which accepts a map of add-on configurations. The generic interface for an add-on is defined below for reference:
module \"eks_blueprints_addons\" {\nsource = \"aws-ia/eks-blueprints-addons/aws\"\n # ... truncated for brevity\neks_addons = {\n<key> = {\nname = string # Optional - <key> is used if `name` is not set\nmost_recent = bool\naddon_version = string # overrides `most_recent` if set\nconfiguration_values = string # JSON string\npreserve = bool # defaults to `true`\nresolve_conflicts_on_create = string # defaults to `OVERWRITE`\nresolve_conflicts_on_update = string # defaults to `OVERWRITE`\ntimeouts = {\ncreate = string # optional\nupdate = string # optional\ndelete = string # optional\n}\ntags = map(string)\n}\n}\n}\n
"},{"location":"amazon-eks-addons/#example","title":"Example","text":"module \"eks_blueprints_addons\" {\nsource = \"aws-ia/eks-blueprints-addons/aws\"\n # ... truncated for brevity\neks_addons = {\n # Amazon EKS add-ons\naws-ebs-csi-driver = {\nmost_recent = true\nservice_account_role_arn = module.ebs_csi_driver_irsa.iam_role_arn\n}\ncoredns = {\nmost_recent = true\ntimeouts = {\ncreate = \"25m\"\ndelete = \"10m\"\n}\n}\nvpc-cni = {\nmost_recent = true\nservice_account_role_arn = module.vpc_cni_irsa.iam_role_arn\n}\nkube-proxy = {}\n # Third party add-ons via AWS Marketplace\nkubecost_kubecost = {\nmost_recent = true\n}\nteleport_teleport = {\nmost_recent = true\n}\n}\n}\n
"},{"location":"amazon-eks-addons/#configuration-values","title":"Configuration Values","text":"You can supply custom configuration values to each addon via the configuration_values
argument of the add-on definition. The value provided must be a JSON encoded string and adhere to the JSON scheme provided by the version of the add-on. You can view this schema using the awscli by supplying the add-on name and version to the describe-addon-configuration
command:
aws eks describe-addon-configuration \\\n--addon-name coredns \\\n--addon-version v1.8.7-eksbuild.2 \\\n--query 'configurationSchema' \\\n--output text | jq\n
Which returns the formatted JSON schema like below:
{\n\"$ref\": \"#/definitions/Coredns\",\n\"$schema\": \"http://json-schema.org/draft-06/schema#\",\n\"definitions\": {\n\"Coredns\": {\n\"additionalProperties\": false,\n\"properties\": {\n\"computeType\": {\n\"type\": \"string\"\n},\n\"corefile\": {\n\"description\": \"Entire corefile contents to use with installation\",\n\"type\": \"string\"\n},\n\"nodeSelector\": {\n\"additionalProperties\": {\n\"type\": \"string\"\n},\n\"type\": \"object\"\n},\n\"replicaCount\": {\n\"type\": \"integer\"\n},\n\"resources\": {\n\"$ref\": \"#/definitions/Resources\"\n}\n},\n\"title\": \"Coredns\",\n\"type\": \"object\"\n},\n\"Limits\": {\n\"additionalProperties\": false,\n\"properties\": {\n\"cpu\": {\n\"type\": \"string\"\n},\n\"memory\": {\n\"type\": \"string\"\n}\n},\n\"title\": \"Limits\",\n\"type\": \"object\"\n},\n\"Resources\": {\n\"additionalProperties\": false,\n\"properties\": {\n\"limits\": {\n\"$ref\": \"#/definitions/Limits\"\n},\n\"requests\": {\n\"$ref\": \"#/definitions/Limits\"\n}\n},\n\"title\": \"Resources\",\n\"type\": \"object\"\n}\n}\n}\n
You can supply the configuration values to the add-on by passing a map of the values wrapped in the jsonencode()
function as shown below:
module \"eks_blueprints_addons\" {\nsource = \"aws-ia/eks-blueprints-addons/aws\"\n # ... truncated for brevity\neks_addons = {\ncoredns = {\nmost_recent = true\nconfiguration_values = jsonencode({\nreplicaCount = 4\nresources = {\nlimits = {\ncpu = \"100m\"\nmemory = \"150Mi\"\n}\nrequests = {\ncpu = \"100m\"\nmemory = \"150Mi\"\n}\n}\n})\n}\n}\n}\n
"},{"location":"architectures/","title":"Architectures","text":""},{"location":"architectures/#addons","title":"Addons","text":"Addon x86_64/amd64 arm64 Argo Rollouts \u2705 \u2705 Argo Workflows \u2705 \u2705 Argo CD \u2705 \u2705 AWS CloudWatch Metrics \u2705 \u2705 AWS EFS CSI Driver \u2705 \u2705 AWS for FluentBit \u2705 \u2705 AWS FSx CSI Driver \u2705 \u2705 AWS Load Balancer Controller \u2705 \u2705 AWS Node Termination Handler \u2705 \u2705 AWS Private CA Issuer \u2705 \u2705 Cert Manager \u2705 \u2705 Cluster Autoscaler \u2705 \u2705 Cluster Proportional Autoscaler \u2705 \u2705 External DNS \u2705 \u2705 External Secrets \u2705 \u2705 OPA Gatekeeper \u2705 \u2705 Ingress Nginx \u2705 \u2705 Karpenter \u2705 \u2705 Kube-Prometheus Stack \u2705 \u2705 Metrics Server \u2705 \u2705 Secrets Store CSI Driver \u2705 \u2705 Secrets Store CSI Driver Provider AWS \u2705 \u2705 Velero \u2705 \u2705 Vertical Pod Autoscaler \u2705 \u2705"},{"location":"architectures/#amazon-eks-addons","title":"Amazon EKS Addons","text":"The Amazon EKS provided add-ons listed below support both x86_64/amd64
and arm64
architectures. Third party add-ons that are available via the AWS Marketplace will vary based on the support provided by the add-on vendor. No additional changes are required to add-on configurations when switching between x86_64/amd64
and arm64
architectures; Amazon EKS add-ons utilize multi-architecture container images to support this functionality. These addons are specified via the eks_addons
input variable.
The following addons are provided by AWS Partners for use with Amazon EKS Blueprints for Terraform. Please see the respective addon repository for more information on the addon, its supported configuration values, as well as questions, comments, and feature requests.
Addon Description Ondat Ondat is a Kubernetes-native storage platform that enables stateful applications to run on Kubernetes. Hashicorp - Consul Consul is a service networking solution to automate network configurations, discover services, and enable secure connectivity across any cloud or runtime. Hashicorp - Vault Vault secures, stores, and tightly controls access to tokens, passwords, certificates, API keys, and other secrets in modern computing. Sysdig Sysdig CNAPP helps you stop cloud and container security attacks with no wasted time. Tetrate Istio Tetrate Istio Distro is an open source project from Tetrate that provides vetted builds of Istio tested against all major cloud platforms."},{"location":"helm-release/","title":"Helm Release Add-ons","text":"Starting with EKS Blueprints v5 we have made a decision to only support the provisioning of a certain core set of add-ons. On an going basis, we will evaluate the current list to see if more add-ons need to be supported via this repo. Typically you can expect that any AWS created add-on that is not yet available via the Amazon EKS add-ons will be prioritized to be provisioned through this repository.
In addition to these AWS add-ons, we will also support the provisioning of certain OSS add-ons that we think customers will benefit from. These are selected based on customer demand (e.g. metrics-server) and certain patterns (gitops) that are foundational elements for a complete blueprint of an EKS cluster.
One of the reasons customers pick Kubernetes is because of its strong commercial and open-source software ecosystem and would like to provision add-ons that are not necessarily supported by EKS Blueprints. For such add-ons the options are as following:
"},{"location":"helm-release/#with-helm_release-terraform-resource","title":"Withhelm_release
Terraform Resource","text":"The helm_release resource is the most fundamental way to provision a helm chart via Terraform.
Use this resource, if you need to control the lifecycle add-ons down to level of each add-on resource.
"},{"location":"helm-release/#with-helm_releases-variable","title":"Withhelm_releases
Variable","text":"You can use the helm_releases
variable in EKS Blueprints Add-ons to provide a map of add-ons and their respective Helm configuration. Under the hood, we just iterate through the provided map and pass each configuration to the Terraform helm_release resource.
E.g.
module \"addons\" {\nsource = \"aws-ia/eks-blueprints-addons/aws\"\nversion = \"~> 1.0\"\ncluster_name = \"<cluster_name>\"\ncluster_endpoint = \"<cluster_endpoint>\"\ncluster_version = \"<cluster_version>\"\noidc_provider_arn = \"<oidc_provider_arn>\"\n # EKS add-ons\neks_addons = {\ncoredns = {}\nvpc-cni = {}\nkube-proxy = {}\n}\n # Blueprints add-ons\nenable_aws_efs_csi_driver = true\nenable_aws_cloudwatch_metrics = true\nenable_cert_manager = true\n...\n # Pass in any number of Helm charts to be created for those that are not natively supported\nhelm_releases = {\nprometheus-adapter = {\ndescription = \"A Helm chart for k8s prometheus adapter\"\nnamespace = \"prometheus-adapter\"\ncreate_namespace = true\nchart = \"prometheus-adapter\"\nchart_version = \"4.2.0\"\nrepository = \"https://prometheus-community.github.io/helm-charts\"\nvalues = [\n<<-EOT\n replicas: 2\n podDisruptionBudget:\n enabled: true\n EOT\n]\n}\ngpu-operator = {\ndescription = \"A Helm chart for NVIDIA GPU operator\"\nnamespace = \"gpu-operator\"\ncreate_namespace = true\nchart = \"gpu-operator\"\nchart_version = \"v23.3.2\"\nrepository = \"https://nvidia.github.io/gpu-operator\"\nvalues = [\n<<-EOT\n operator:\n defaultRuntime: containerd\n EOT\n]\n}\n}\ntags = local.tags\n}\n
With this pattern, the lifecycle of all your add-ons is tied to that of the addons
module. This allows you to easily target the addon module in your Terraform apply and destroy commands. E.g.
terraform apply -target=module.addons\n\nterraform destroy -target=module.addons\n
"},{"location":"helm-release/#with-eks-blueprints-addon-module","title":"With EKS Blueprints Addon Module","text":"If you have an add-on that requires an IAM Role for Service Account (IRSA), we have created a new Terraform module terraform-aws-eks-blueprints-addon that can help provision a Helm chart along with an IAM role and policies with permissions required for the add-on to function properly. We use this module for all of the add-ons that are provisioned by EKS Blueprints Add-ons today.
You can optionally use this module for add-ons that do not need IRSA or even just to create the IAM resources for IRSA and skip the helm release. Detailed usage of how to consume this module can be found in its readme.
This pattern can be used to create a Terraform module with a set of add-ons that are not supported in the EKS Blueprints Add-ons today and wrap them in the same module definition. An example of this is the ACK add-ons repository which is a collection of ACK helm chart deployments with IRSA for each of the ACK controllers.
"},{"location":"addons/argo-events/","title":"Argo Events","text":"Argo Events is an open source container-native event-driven workflow automation framework for Kubernetes which helps you trigger K8s objects, Argo Workflows, Serverless workloads, etc. on events from a variety of sources. Argo Events is implemented as a Kubernetes CRD (Custom Resource Definition).
"},{"location":"addons/argo-events/#usage","title":"Usage","text":"Argo Events can be deployed by enabling the add-on via the following.
enable_argo_events = true\n
You can optionally customize the Helm chart that deploys Argo Events via the following configuration.
enable_argo_events = true\nargo_events = {\nname = \"argo-events\"\nchart_version = \"2.4.0\"\nrepository = \"https://argoproj.github.io/argo-helm\"\nnamespace = \"argo-events\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n
Verify argo-events pods are running.
$ kubectl get pods -n argo-events\nNAME READY STATUS RESTARTS AGE\nargo-events-controller-manager-bfb894cdb-k8hzn 1/1 Running 0 11m\n
"},{"location":"addons/argo-rollouts/","title":"Argo Rollouts","text":"Argo Rollouts is a Kubernetes controller and set of CRDs which provide advanced deployment capabilities such as blue-green, canary, canary analysis, experimentation, and progressive delivery features to Kubernetes.
"},{"location":"addons/argo-rollouts/#usage","title":"Usage","text":"Argo Rollouts can be deployed by enabling the add-on via the following.
enable_argo_rollouts = true\n
You can optionally customize the Helm chart that deploys Argo Rollouts via the following configuration.
enable_argo_rollouts = true\nargo_rollouts = {\nname = \"argo-rollouts\"\nchart_version = \"2.22.3\"\nrepository = \"https://argoproj.github.io/argo-helm\"\nnamespace = \"argo-rollouts\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n
Verify argo-rollouts pods are running.
$ kubectl get pods -n argo-rollouts\nNAME READY STATUS RESTARTS AGE\nargo-rollouts-5db5688849-x89zb 0/1 Running 0 11s\n
"},{"location":"addons/argo-workflows/","title":"Argo Workflows","text":"Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. Argo Workflows is implemented as a Kubernetes CRD (Custom Resource Definition).
"},{"location":"addons/argo-workflows/#usage","title":"Usage","text":"Argo Workflows can be deployed by enabling the add-on via the following.
enable_argo_workflows = true\n
You can optionally customize the Helm chart that deploys Argo Workflows via the following configuration.
enable_argo_workflows = true\nargo_workflows = {\nname = \"argo-workflows\"\nchart_version = \"0.28.2\"\nrepository = \"https://argoproj.github.io/argo-helm\"\nnamespace = \"argo-workflows\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n
Verify argo-workflows pods are running.
$ kubectl get pods -n argo-workflows\nNAME READY STATUS RESTARTS AGE\nargo-workflows-server-68988cd864-22zhr 1/1 Running 0 6m32s\nargo-workflows-workflow-controller-7ff7b5658d-9q44f 1/1 Running 0 6m32s\n
"},{"location":"addons/argocd/","title":"Argo CD","text":"Argo CD is a declarative, GitOps continuous delivery tool for Kubernetes.
"},{"location":"addons/argocd/#usage","title":"Usage","text":"Argo CD can be deployed by enabling the add-on via the following.
enable_argocd = true\n
You can optionally customize the Helm chart that deploys Argo CD via the following configuration.
enable_argocd = true\nargocd = {\nname = \"argocd\"\nchart_version = \"5.29.1\"\nrepository = \"https://argoproj.github.io/argo-helm\"\nnamespace = \"argocd\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n
Verify argocd pods are running.
$ kubectl get pods -n argocd\nNAME READY STATUS RESTARTS AGE\nargo-cd-argocd-application-controller-0 1/1 Running 0 146m\nargo-cd-argocd-applicationset-controller-678d85f77b-rmpcb 1/1 Running 0 146m\nargo-cd-argocd-dex-server-7b6c9b5969-zpqnl 1/1 Running 0 146m\nargo-cd-argocd-notifications-controller-6d489b99c9-j6fdw 1/1 Running 0 146m\nargo-cd-argocd-redis-59dd95f5b5-8fx74 1/1 Running 0 146m\nargo-cd-argocd-repo-server-7b9bd88c95-mh2fz 1/1 Running 0 146m\nargo-cd-argocd-server-6f9cfdd4d5-8mfpc 1/1 Running 0 146m\n
"},{"location":"addons/aws-cloudwatch-metrics/","title":"AWS CloudWatch Metrics","text":"Use CloudWatch Container Insights to collect, aggregate, and summarize metrics and logs from your containerized applications and microservices. CloudWatch automatically collects metrics for many resources, such as CPU, memory, disk, and network. Container Insights also provides diagnostic information, such as container restart failures, to help you isolate issues and resolve them quickly. You can also set CloudWatch alarms on metrics that Container Insights collects.
Container Insights collects data as performance log events using embedded metric format. These performance log events are entries that use a structured JSON schema that enables high-cardinality data to be ingested and stored at scale. From this data, CloudWatch creates aggregated metrics at the cluster, node, pod, task, and service level as CloudWatch metrics. The metrics that Container Insights collects are available in CloudWatch automatic dashboards, and also viewable in the Metrics section of the CloudWatch console.
"},{"location":"addons/aws-cloudwatch-metrics/#usage","title":"Usage","text":"aws-cloudwatch-metrics can be deployed by enabling the add-on via the following.
enable_aws_cloudwatch_metrics = true\n
You can also customize the Helm chart that deploys aws-cloudwatch-metrics
via the following configuration:
enable_aws_cloudwatch_metrics = true\naws_cloudwatch_metrics_irsa_policies = [\"IAM Policies\"]\naws_cloudwatch_metrics = {\nrole_policies = [\"IAM Policies\"] # extra policies in addition of CloudWatchAgentServerPolicy\nname = \"aws-cloudwatch-metrics\"\nrepository = \"https://aws.github.io/eks-charts\"\nchart_version = \"0.0.9\"\nnamespace = \"amazon-cloudwatch\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})] # The value `clusterName` is already set to the EKS cluster name, no need to specify here\n}\n
Verify aws-cloudwatch-metrics pods are running
$ kubectl get pods -n amazon-cloudwatch\n\nNAME READY STATUS RESTARTS AGE\naws-cloudwatch-metrics-2dt5h 1/1 Running 0 149m\n
"},{"location":"addons/aws-efs-csi-driver/","title":"AWS EFS CSI Driver","text":"This add-on deploys the AWS EFS CSI driver into an EKS cluster.
"},{"location":"addons/aws-efs-csi-driver/#usage","title":"Usage","text":"The AWS EFS CSI driver can be deployed by enabling the add-on via the following. Check out the full example to deploy an EKS Cluster with EFS backing the dynamic provisioning of persistent volumes.
enable_aws_efs_csi_driver = true\n
You can optionally customize the Helm chart that deploys the driver via the following configuration.
enable_aws_efs_csi_driver = true\n # Optional aws_efs_csi_driver_helm_config\naws_efs_csi_driver = {\nrepository = \"https://kubernetes-sigs.github.io/aws-efs-csi-driver/\"\nchart_version = \"2.4.1\"\n}\naws_efs_csi_driver {\nrole_policies = [\"<ADDITIONAL_IAM_POLICY_ARN>\"]\n}\n
Once deployed, you will be able to see a number of supporting resources in the kube-system
namespace.
$ kubectl get deployment efs-csi-controller -n kube-system\n\nNAME READY UP-TO-DATE AVAILABLE AGE\nefs-csi-controller 2/2 2 2 4m29s\n
$ kubectl get daemonset efs-csi-node -n kube-system\n\nNAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE\nefs-csi-node 3 3 3 3 3 beta.kubernetes.io/os=linux 4m32s\n
"},{"location":"addons/aws-efs-csi-driver/#validate-efs-csi-driver","title":"Validate EFS CSI Driver","text":"Follow the static provisioning example described here to validate the CSI driver is working as expected.
"},{"location":"addons/aws-for-fluentbit/","title":"AWS for Fluent Bit","text":"AWS provides a Fluent Bit image with plugins for both CloudWatch Logs and Kinesis Data Firehose. We recommend using Fluent Bit as your log router because it has a lower resource utilization rate than Fluentd.
"},{"location":"addons/aws-for-fluentbit/#usage","title":"Usage","text":"AWS for Fluent Bit can be deployed by enabling the add-on via the following.
enable_aws_for_fluentbit = true\n
You can optionally customize the Helm chart that deploys AWS for Fluent Bit via the following configuration.
enable_aws_for_fluentbit = true\naws_for_fluentbit_cw_log_group = {\ncreate = true\nuse_name_prefix = true # Set this to true to enable name prefix\nname_prefix = \"eks-cluster-logs-\"\nretention = 7\n}\naws_for_fluentbit = {\nname = \"aws-for-fluent-bit\"\nchart_version = \"0.1.24\"\nrepository = \"https://aws.github.io/eks-charts\"\nnamespace = \"kube-system\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n
"},{"location":"addons/aws-for-fluentbit/#verify-the-fluent-bit-setup","title":"Verify the Fluent Bit setup","text":"Verify aws-for-fluentbit pods are running.
$ kuebctl get pods -n kube-system\nNAME READY STATUS RESTARTS AGE\naws-for-fluent-bit-6kp66 1/1 Running 0 172m\n
Open the CloudWatch console at https://console.aws.amazon.com/cloudwatch/
In the navigation pane, choose Log groups.
Make sure that you're in the Region where you deployed Fluent Bit.
Check the list of log groups in the Region. You should see the following:
/aws/containerinsights/Cluster_Name/application\n\n/aws/containerinsights/Cluster_Name/host\n\n/aws/containerinsights/Cluster_Name/dataplane\n
Navigate to one of these log groups and check the Last Event Time for the log streams. If it is recent relative to when you deployed Fluent Bit, the setup is verified.
There might be a slight delay in creating the /dataplane log group. This is normal as these log groups only get created when Fluent Bit starts sending logs for that log group.
"},{"location":"addons/aws-fsx-csi-driver/","title":"AWS FSx CSI Driver","text":"This add-on deploys the Amazon FSx CSI Driver in to an Amazon EKS Cluster.
"},{"location":"addons/aws-fsx-csi-driver/#usage","title":"Usage","text":"The Amazon FSx CSI Driver can be deployed by enabling the add-on via the following.
enable_aws_fsx_csi_driver = true\n
"},{"location":"addons/aws-fsx-csi-driver/#helm-chart-customization","title":"Helm Chart customization","text":"You can optionally customize the Helm chart deployment using a configuration like the following.
enable_aws_fsx_csi_driver = true\naws_fsx_csi_driver = {\nnamespace = \"aws-fsx-csi-driver\"\nchart_version = \"1.6.0\"\nrole_policies = <ADDITIONAL_IAM_POLICY_ARN>\n}\n
You can find all available Helm Chart parameter values here
"},{"location":"addons/aws-fsx-csi-driver/#validation","title":"Validation","text":"Once deployed, you will be able to see a number of supporting resources in the kube-system
namespace.
$ kubectl -n kube-system get deployment fsx-csi-controller\n\nNAME READY UP-TO-DATE AVAILABLE AGE\nfsx-csi-controller 2/2 2 2 4m29s\n\n$ kubectl -n kube-system get pods -l app=fsx-csi-controller\nNAME READY STATUS RESTARTS AGE\nfsx-csi-controller-56c6d9bbb8-89cpc 4/4 Running 0 3m30s\nfsx-csi-controller-56c6d9bbb8-9wnlh 4/4 Running 0 3m30s\n
$ kubectl -n kube-system get daemonset fsx-csi-node\nNAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE\nfsx-csi-node 3 3 3 3 3 kubernetes.io/os=linux 5m27s\n\n$ kubectl -n kube-system get pods -l app=fsx-csi-node\nNAME READY STATUS RESTARTS AGE\nfsx-csi-node-7c5z6 3/3 Running 0 5m29s\nfsx-csi-node-d5q28 3/3 Running 0 5m29s\nfsx-csi-node-hlg8q 3/3 Running 0 5m29s\n
Create a StorageClass. Replace the SubnetID and the SecurityGroupID with your own values. More details here.
$ cat <<EOF | kubectl apply -f -\nkind: StorageClass\napiVersion: storage.k8s.io/v1\nmetadata:\n name: fsx-sc\nprovisioner: fsx.csi.aws.com\nparameters:\n subnetId: <YOUR_SUBNET_IDs>\n securityGroupIds: <YOUR_SG_ID>\n perUnitStorageThroughput: \"200\"\n deploymentType: PERSISTENT_1\nmountOptions:\n - flock\nEOF\n
$ kubect describe storageclass fsx-sc\nName: fsx-sc\nIsDefaultClass: No\nAnnotations: kubectl.kubernetes.io/last-applied-configuration={\"apiVersion\":\"storage.k8s.io/v1\",\"kind\":\"StorageClass\",\"metadata\":{\"annotations\":{},\"name\":\"fsx-sc\"},\"mountOptions\":null,\"parameters\":{\"deploymentType\":\"PERSISTENT_1\",\"perUnitStorageThroughput\":\"200\",\"securityGroupIds\":\"sg-q1w2e3r4t5y6u7i8o\",\"subnetId\":\"subnet-q1w2e3r4t5y6u7i8o\"},\"provisioner\":\"fsx.csi.aws.com\"}\nProvisioner: fsx.csi.aws.com\nParameters: deploymentType=PERSISTENT_1,perUnitStorageThroughput=200,securityGroupIds=sg-q1w2e3r4t5y6u7i8o,subnetId=subnet-q1w2e3r4t5y6u7i8o\nAllowVolumeExpansion: <unset>\nMountOptions: <none>\nReclaimPolicy: Delete\nVolumeBindingMode: Immediate\nEvents: <none>\n
Create a PVC.
$ cat <<EOF | kubectl apply -f -\napiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n name: fsx-claim\nspec:\n accessModes:\n - ReadWriteMany\n storageClassName: fsx-sc\n resources:\n requests:\n storage: 1200Gi\nEOF\n
Wait for the PV to be created and bound to your PVC.
$ kubectl get pvc\nNAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE\nfsx-claim Bound pvc-df385730-72d6-4b0c-8275-cc055a438760 1200Gi RWX fsx-sc 7m47s\n$ kubectl get pv\nNAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE\npvc-df385730-72d6-4b0c-8275-cc055a438760 1200Gi RWX Delete Bound default/fsx-claim fsx-sc 2m13s\n
"},{"location":"addons/aws-gateway-api-controller/","title":"AWS Gateway API Controller","text":"AWS Gateway API Controller lets you connect services across multiple Kubernetes clusters through the Kubernetes Gateway API interface. It is also designed to connect services running on EC2 instances, containers, and as serverless functions. It does this by leveraging Amazon VPC Lattice, which works with Kubernetes Gateway API calls to manage Kubernetes objects.
"},{"location":"addons/aws-gateway-api-controller/#usage","title":"Usage","text":"AWS Gateway API Controller can be deployed by enabling the add-on via the following.
enable_aws_gateway_api_controller = true\naws_gateway_api_controller = {\nrepository_username = data.aws_ecrpublic_authorization_token.token.user_name\nrepository_password = data.aws_ecrpublic_authorization_token.token.password\nset = [{\nname = \"clusterVpcId\"\nvalue = \"vpc-12345abcd\"\n}]\n}\n
You can optionally customize the Helm chart that deploys AWS Gateway API Controller via the following configuration.
enable_aws_gateway_api_controller = true\naws_gateway_api_controller = {\nname = \"aws-gateway-api-controller\"\nchart_version = \"v0.0.12\"\nrepository = \"oci://public.ecr.aws/aws-application-networking-k8s\"\nrepository_username = data.aws_ecrpublic_authorization_token.token.user_name\nrepository_password = data.aws_ecrpublic_authorization_token.token.password\nnamespace = \"aws-application-networking-system\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\nset = [{\nname = \"clusterVpcId\"\nvalue = \"vpc-12345abcd\"\n}]\n}\n
Verify aws-gateway-api-controller pods are running.
$ kubectl get pods -n aws-application-networking-system\nNAME READY STATUS RESTARTS AGE\naws-gateway-api-controller-aws-gateway-controller-chart-8f42q426 1/1 Running 0 40s\naws-gateway-api-controller-aws-gateway-controller-chart-8f4tbl9g 1/1 Running 0 71s\n
Deploy example GatewayClass
$ kubectl apply -f https://raw.githubusercontent.com/aws/aws-application-networking-k8s/main/examples/gatewayclass.yaml\ngatewayclass.gateway.networking.k8s.io/amazon-vpc-lattice created\n
Describe GatewayClass
$ kubectl describe gatewayclass\nName: amazon-vpc-lattice\nNamespace:\nLabels: <none>\nAnnotations: <none>\nAPI Version: gateway.networking.k8s.io/v1beta1\nKind: GatewayClass\nMetadata:\n Creation Timestamp: 2023-06-22T22:33:32Z\n Generation: 1\nResource Version: 819021\nUID: aac59195-8f37-4c23-a2a5-b0f363deda77\nSpec:\n Controller Name: application-networking.k8s.aws/gateway-api-controller\nStatus:\n Conditions:\n Last Transition Time: 2023-06-22T22:33:32Z\n Message: Accepted\n Observed Generation: 1\nReason: Accepted\n Status: True\n Type: Accepted\nEvents: <none>\n
"},{"location":"addons/aws-load-balancer-controller/","title":"AWS Load Balancer Controller.","text":"AWS Load Balancer Controller is a controller to help manage Elastic Load Balancers for a Kubernetes cluster. This Add-on deploys this controller in an Amazon EKS Cluster.
"},{"location":"addons/aws-load-balancer-controller/#usage","title":"Usage","text":"In order to deploy the AWS Load Balancer Controller Addon via EKS Blueprints Addons, reference the following parameters under the module.eks_blueprints_addons
.
module \"eks_blueprints_addons\" {\nenable_aws_load_balancer_controller = true\naws_load_balancer_controller = {\nset = [\n{\nname = \"vpcId\"\nvalue = module.vpc.vpc_id\n},\n{\nname = \"podDisruptionBudget.maxUnavailable\"\nvalue = 1\n},\n]\n}\n
"},{"location":"addons/aws-load-balancer-controller/#helm-chart-customization","title":"Helm Chart customization","text":"It's possible to customize your deployment using the Helm Chart parameters inside the aws_load_balancer_controller
configuration block:
aws_load_balancer_controller = {\nset = [\n{\nname = \"vpcId\"\nvalue = module.vpc.vpc_id\n},\n{\nname = \"podDisruptionBudget.maxUnavailable\"\nvalue = 1\n},\n{\nname = \"resources.requests.cpu\"\nvalue = 100m\n},\n{\nname = \"resources.requests.memory\"\nvalue = 128Mi\n},\n]\n}\n}\n
You can find all available Helm Chart parameter values here.
"},{"location":"addons/aws-load-balancer-controller/#validate","title":"Validate","text":"aws-load-balancer-controller
Pods were created in the kube-system
Namespace, as the following example.kubectl -n kube-system get pods | grep aws-load-balancer-controller\nNAMESPACE NAME READY STATUS RESTARTS AGE\nkube-system aws-load-balancer-controller-6cbdb58654-fvskt 1/1 Running 0 26m\nkube-system aws-load-balancer-controller-6cbdb58654-sc7dk 1/1 Running 0 26m\n
alb
IngressClass, pointing to an existing Service. In this example we'll use a Service called example-svc
.kubectl create ingress example-ingress --class alb --rule=\"/*=example-svc:80\" \\\n--annotation alb.ingress.kubernetes.io/scheme=internet-facing \\\n--annotation alb.ingress.kubernetes.io/target-type=ip\n
kubectl get ingress NAME CLASS HOSTS ADDRESS PORTS AGE\nexample-ingress alb * k8s-example-ingress-7e0d6f03e7-1234567890.us-west-2.elb.amazonaws.com 80 4m9s\n
"},{"location":"addons/aws-load-balancer-controller/#resources","title":"Resources","text":"GitHub Repo Helm Chart AWS Docs
"},{"location":"addons/aws-node-termination-handler/","title":"AWS Node Termination Handler","text":"This project ensures that the Kubernetes control plane responds appropriately to events that can cause your EC2 instance to become unavailable, such as EC2 maintenance events, EC2 Spot interruptions, ASG Scale-In, ASG AZ Rebalance, and EC2 Instance Termination via the API or Console. If not handled, your application code may not stop gracefully, take longer to recover full availability, or accidentally schedule work to nodes that are going down.
"},{"location":"addons/aws-node-termination-handler/#usage","title":"Usage","text":"AWS Node Termination Handler can be deployed by enabling the add-on via the following.
enable_aws_node_termination_handler = true\n
You can optionally customize the Helm chart that deploys AWS Node Termination Handler via the following configuration.
enable_aws_node_termination_handler = true\naws_node_termination_handler = {\nname = \"aws-node-termination-handler\"\nchart_version = \"0.21.0\"\nrepository = \"https://aws.github.io/eks-charts\"\nnamespace = \"aws-node-termination-handler\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n
Verify aws-node-termination-handler pods are running.
$ kubectl get pods -n aws-node-termination-handler\nNAME READY STATUS RESTARTS AGE\naws-node-termination-handler-6f598b6b89-6mqgk 1/1 Running 1 (22h ago) 26h\n
Verify SQS Queue is created.
$ aws sqs list-queues\n\n{\n\"QueueUrls\": [\n\"https://sqs.us-east-1.amazonaws.com/XXXXXXXXXXXXXX/aws_node_termination_handler20221123072051157700000004\"\n]\n}\n
Verify Event Rules are created.
$ aws event list-rules\n{\n[\n{\n\"Name\": \"NTH-ASGTerminiate-20230602191740664900000025\",\n \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTH-ASGTerminiate-20230602191740664900000025\",\n \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Instance-terminate Lifecycle Action\\\"],\\\"source\\\":[\\\"aws.autoscaling\\\"]}\",\n \"State\": \"ENABLED\",\n \"Description\": \"Auto scaling instance terminate event\",\n \"EventBusName\": \"default\"\n},\n {\n\"Name\": \"NTH-HealthEvent-20230602191740079300000022\",\n \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTH-HealthEvent-20230602191740079300000022\",\n \"EventPattern\": \"{\\\"detail-type\\\":[\\\"AWS Health Event\\\"],\\\"source\\\":[\\\"aws.health\\\"]}\",\n \"State\": \"ENABLED\",\n \"Description\": \"AWS health event\",\n \"EventBusName\": \"default\"\n},\n {\n\"Name\": \"NTH-InstanceRebalance-20230602191740077100000021\",\n \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTH-InstanceRebalance-20230602191740077100000021\",\n \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Instance Rebalance Recommendation\\\"],\\\"source\\\":[\\\"aws.ec2\\\"]}\",\n \"State\": \"ENABLED\",\n \"Description\": \"EC2 instance rebalance recommendation\",\n \"EventBusName\": \"default\"\n},\n {\n\"Name\": \"NTH-InstanceStateChange-20230602191740165000000024\",\n \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTH-InstanceStateChange-20230602191740165000000024\",\n \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Instance State-change Notification\\\"],\\\"source\\\":[\\\"aws.ec2\\\"]}\",\n \"State\": \"ENABLED\",\n \"Description\": \"EC2 instance state-change notification\",\n \"EventBusName\": \"default\"\n},\n {\n\"Name\": \"NTH-SpotInterrupt-20230602191740077100000020\",\n \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTH-SpotInterrupt-20230602191740077100000020\",\n \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Spot Instance Interruption Warning\\\"],\\\"source\\\":[\\\"aws.ec2\\\"]}\",\n \"State\": \"ENABLED\",\n \"Description\": \"EC2 spot instance interruption warning\",\n \"EventBusName\": \"default\"\n},\n {\n\"Name\": \"NTHASGTermRule\",\n \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTHASGTermRule\",\n \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Instance-terminate Lifecycle Action\\\"],\\\"source\\\":[\\\"aws.autoscaling\\\"]}\",\n \"State\": \"ENABLED\",\n \"EventBusName\": \"default\"\n},\n {\n\"Name\": \"NTHInstanceStateChangeRule\",\n \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTHInstanceStateChangeRule\",\n \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Instance State-change Notification\\\"],\\\"source\\\":[\\\"aws.ec2\\\"]}\",\n \"State\": \"ENABLED\",\n \"EventBusName\": \"default\"\n},\n {\n\"Name\": \"NTHRebalanceRule\",\n \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTHRebalanceRule\",\n \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Instance Rebalance Recommendation\\\"],\\\"source\\\":[\\\"aws.ec2\\\"]}\",\n \"State\": \"ENABLED\",\n \"EventBusName\": \"default\"\n},\n {\n\"Name\": \"NTHScheduledChangeRule\",\n \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTHScheduledChangeRule\",\n \"EventPattern\": \"{\\\"detail-type\\\":[\\\"AWS Health Event\\\"],\\\"source\\\":[\\\"aws.health\\\"]}\",\n \"State\": \"ENABLED\",\n \"EventBusName\": \"default\"\n},\n {\n\"Name\": \"NTHSpotTermRule\",\n \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTHSpotTermRule\",\n \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Spot Instance Interruption Warning\\\"],\\\"source\\\":[\\\"aws.ec2\\\"]}\",\n \"State\": \"ENABLED\",\n \"EventBusName\": \"default\"\n}\n]\n}\n
"},{"location":"addons/aws-private-ca-issuer/","title":"AWS Private CA Issuer","text":"AWS Private CA is an AWS service that can setup and manage private CAs, as well as issue private certificates. This add-on deploys the AWS Private CA Issuer as an external issuer to cert-manager that signs off certificate requests using AWS Private CA in an Amazon EKS Cluster.
"},{"location":"addons/aws-private-ca-issuer/#usage","title":"Usage","text":""},{"location":"addons/aws-private-ca-issuer/#pre-requisites","title":"Pre-requisites","text":"To deploy the AWS PCA, you need to install cert-manager first, refer to this documentation to do it through EKS Blueprints Addons.
"},{"location":"addons/aws-private-ca-issuer/#deployment","title":"Deployment","text":"With cert-manager deployed in place, you can deploy the AWS Private CA Issuer Add-on via EKS Blueprints Addons, reference the following parameters under the module.eks_blueprints_addons
.
module \"eks_blueprints_addons\" {\nenable_cert_manager = true\nenable_aws_privateca_issuer = true\naws_privateca_issuer = {\nacmca_arn = aws_acmpca_certificate_authority.this.arn\n}\n}\n
"},{"location":"addons/aws-private-ca-issuer/#helm-chart-customization","title":"Helm Chart customization","text":"It's possible to customize your deployment using the Helm Chart parameters inside the aws_load_balancer_controller
configuration block:
aws_privateca_issuer = {\nacmca_arn = aws_acmpca_certificate_authority.this.arn\nnamespace = \"aws-privateca-issuer\"\ncreate_namespace = true\n}\n
You can find all available Helm Chart parameter values here.
"},{"location":"addons/aws-private-ca-issuer/#validation","title":"Validation","text":"aws-privateca-issuer
and cert-manager
Namespace.kubectl get pods -n aws-privateca-issuer\nkubectl get pods -n cert-manager\n
certificate
status in it should be in Ready
state, and be pointing to a secret
created in the same Namespace.kubectl get certificate -o wide\nNAME READY SECRET ISSUER STATUS AGE\nexample True example-clusterissuer tls-with-aws-pca-issuer Certificate is up to date and has not expired 41m\n\nkubectl get secret example-clusterissuer\nNAME TYPE DATA AGE\nexample-clusterissuer kubernetes.io/tls 3 43m\n
"},{"location":"addons/aws-private-ca-issuer/#resources","title":"Resources","text":"GitHub Repo Helm Chart AWS Docs
"},{"location":"addons/cert-manager/","title":"Cert-Manager","text":"Cert-manager is a X.509 certificate controller for Kubernetes-like workloads. It will obtain certificates from a variety of Issuers, both popular public Issuers as well as private Issuers, and ensure the certificates are valid and up-to-date, and will attempt to renew certificates at a configured time before expiry. This Add-on deploys this controller in an Amazon EKS Cluster.
"},{"location":"addons/cert-manager/#usage","title":"Usage","text":"To deploy cert-manager Add-on via EKS Blueprints Addons, reference the following parameters under the module.eks_blueprints_addons
.
module \"eks_blueprints_addons\" {\nenable_cert_manager = true\n}\n
"},{"location":"addons/cert-manager/#helm-chart-customization","title":"Helm Chart customization","text":"It's possible to customize your deployment using the Helm Chart parameters inside the cert-manager
configuration block:
cert-manager = {\nchart_version = \"v1.11.1\"\nnamespace = \"cert-manager\"\ncreate_namespace = true\n}\n
You can find all available Helm Chart parameter values here
"},{"location":"addons/cert-manager/#validation","title":"Validation","text":"kubectl -n cert-manager get pods\nNAME READY STATUS RESTARTS AGE\ncert-manager-5989bcc87-96qvf 1/1 Running 0 2m49s\ncert-manager-cainjector-9b44ddb68-8c7b9 1/1 Running 0 2m49s\ncert-manager-webhook-776b65456-k6br4 1/1 Running 0 2m49s\n
apiVersion: cert-manager.io/v1\nkind: ClusterIssuer\nmetadata:\nname: selfsigned-cluster-issuer\nspec:\nselfSigned: {}\n
kubectl get clusterissuers -o wide selfsigned-cluster-issuer\nNAME READY STATUS AGE\nselfsigned-cluster-issuer True 3m\n
apiVersion: cert-manager.io/v1\nkind: Certificate\nmetadata:\nname: example\nnamespace: default\nspec:\nisCA: true\ncommonName: example\nsecretName: example-secret\nprivateKey:\nalgorithm: ECDSA\nsize: 256\nissuerRef:\nname: selfsigned-cluster-issuer\nkind: ClusterIssuer\ngroup: cert-manager.io\n
certificate
status in it should be in Ready
state, and be pointing to a secret
created in the same Namespace.kubectl get certificate -o wide\nNAME READY SECRET ISSUER STATUS AGE\nexample True example-secret selfsigned-cluster-issuer Certificate is up to date and has not expired 44s\n\nkubectl get secret example-secret\nNAME TYPE DATA AGE\nexample-secret kubernetes.io/tls 3 70s\n
"},{"location":"addons/cert-manager/#resources","title":"Resources","text":"GitHub Repo Helm Chart
"},{"location":"addons/cluster-autoscaler/","title":"Cluster Autoscaler","text":"The Kubernetes Cluster Autoscaler automatically adjusts the number of nodes in your cluster when pods fail or are rescheduled onto other nodes. The Cluster Autoscaler uses Auto Scaling groups. For more information, see Cluster Autoscaler on AWS.
"},{"location":"addons/cluster-autoscaler/#usage","title":"Usage","text":"Cluster Autoscaler can be deployed by enabling the add-on via the following.
enable_cluster_autoscaler = true\n
You can optionally customize the Helm chart that deploys Cluster Autoscaler via the following configuration.
enable_cluster_autoscaler = true\ncluster_autoscaler = {\nname = \"cluster-autoscaler\"\nchart_version = \"9.29.0\"\nrepository = \"https://kubernetes.github.io/autoscaler\"\nnamespace = \"kube-system\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n
Verify cluster-autoscaler pods are running.
$ kubectl get pods -n kube-system\nNAME READY STATUS RESTARTS AGE\ncluster-autoscaler-aws-cluster-autoscaler-7ff79bc484-pm8g9 1/1 Running 1 (2d ago) 2d5h\n
"},{"location":"addons/cluster-proportional-autoscaler/","title":"Cluster Proportional Autoscaler","text":"Horizontal cluster-proportional-autoscaler watches over the number of schedulable nodes and cores of the cluster and resizes the number of replicas for the required resource. This functionality may be desirable for applications that need to be autoscaled with the size of the cluster, such as CoreDNS and other services that scale with the number of nodes/pods in the cluster.
The cluster-proportional-autoscaler helps to scale the applications using deployment or replicationcontroller or replicaset. This is an alternative solution to Horizontal Pod Autoscaling. It is typically installed as a Deployment in your cluster.
Refer to the eks-best-practices-guides for addional configuration guidanance.
"},{"location":"addons/cluster-proportional-autoscaler/#usage","title":"Usage","text":"This add-on requires both enable_cluster_proportional_autoscaler
and cluster_proportional_autoscaler
as mandatory fields.
The example shows how to enable cluster-proportional-autoscaler
for CoreDNS Deployment
. CoreDNS deployment is not configured with HPA. So, this add-on helps to scale CoreDNS Add-on according to the size of the nodes and cores.
This Add-on can be used to scale any application with Deployment objects.
enable_cluster_proportional_autoscaler = true\ncluster_proportional_autoscaler = {\nvalues = [\n<<-EOT\n nameOverride: kube-dns-autoscaler\n # Formula for controlling the replicas. Adjust according to your needs\n # replicas = max( ceil( cores * 1/coresPerReplica ) , ceil( nodes * 1/nodesPerReplica ) )\n config:\n linear:\n coresPerReplica: 256\n nodesPerReplica: 16\n min: 1\n max: 100\n preventSinglePointFailure: true\n includeUnschedulableNodes: true\n # Target to scale. In format: deployment/*, replicationcontroller/* or replicaset/* (not case sensitive).\n options:\n target: deployment/coredns # Notice the target as `deployment/coredns`\n serviceAccount:\n create: true\n name: kube-dns-autoscaler\n podSecurityContext:\n seccompProfile:\n type: RuntimeDefault\n supplementalGroups: [65534]\n fsGroup: 65534\n resources:\n limits:\n cpu: 100m\n memory: 128Mi\n requests:\n cpu: 100m\n memory: 128Mi\n tolerations:\n - key: \"CriticalAddonsOnly\"\n operator: \"Exists\"\n description: \"Cluster Proportional Autoscaler for CoreDNS Service\"\n EOT\n]\n}\n
"},{"location":"addons/cluster-proportional-autoscaler/#expected-result","title":"Expected result","text":"The cluster-proportional-autoscaler
pod running in the kube-system
namespace.
kubectl -n kube-system get po -l app.kubernetes.io/instance=cluster-proportional-autoscaler\nNAME READY STATUS RESTARTS AGE\ncluster-proportional-autoscaler-kube-dns-autoscaler-d8dc8477xx7 1/1 Running 0 21h\n
The cluster-proportional-autoscaler-kube-dns-autoscaler
config map exists. kubectl -n kube-system get cm cluster-proportional-autoscaler-kube-dns-autoscaler\nNAME DATA AGE\ncluster-proportional-autoscaler-kube-dns-autoscaler 1 21h\n
"},{"location":"addons/cluster-proportional-autoscaler/#testing","title":"Testing","text":"To test that coredns
pods scale, first take a baseline of how many nodes the cluster has and how many coredns
pods are running.
kubectl get nodes\nNAME STATUS ROLES AGE VERSION\nip-10-0-19-243.ec2.internal Ready <none> 21h v1.26.4-eks-0a21954\nip-10-0-25-182.ec2.internal Ready <none> 21h v1.26.4-eks-0a21954\nip-10-0-40-138.ec2.internal Ready <none> 21h v1.26.4-eks-0a21954\nip-10-0-8-136.ec2.internal Ready <none> 21h v1.26.4-eks-0a21954\n\nkubectl get po -n kube-system -l k8s-app=kube-dns\nNAME READY STATUS RESTARTS AGE\ncoredns-7975d6fb9b-dlkdd 1/1 Running 0 21h\ncoredns-7975d6fb9b-xqqwp 1/1 Running 0 21h\n
Change the following parameters in the hcl code above so a scaling event can be easily triggered:
config:\nlinear:\ncoresPerReplica: 4\nnodesPerReplica: 2\nmin: 1\nmax: 4\n
and execute terraform apply
. Increase the managed node group desired size, in this example from 4 to 5. This can be done via the AWS Console.
Check that the new node came up and coredns
scaled up.
NAME STATUS ROLES AGE VERSION\nip-10-0-14-120.ec2.internal Ready <none> 10m v1.26.4-eks-0a21954\nip-10-0-19-243.ec2.internal Ready <none> 21h v1.26.4-eks-0a21954\nip-10-0-25-182.ec2.internal Ready <none> 21h v1.26.4-eks-0a21954\nip-10-0-40-138.ec2.internal Ready <none> 21h v1.26.4-eks-0a21954\nip-10-0-8-136.ec2.internal Ready <none> 21h v1.26.4-eks-0a21954\n\nkubectl get po -n kube-system -l k8s-app=kube-dns\nNAME READY STATUS RESTARTS AGE\ncoredns-7975d6fb9b-dlkdd 1/1 Running 0 21h\ncoredns-7975d6fb9b-ww64t 1/1 Running 0 10m\ncoredns-7975d6fb9b-xqqwp 1/1 Running 0 21h\n
"},{"location":"addons/external-dns/","title":"External DNS","text":"ExternalDNS makes Kubernetes resources discoverable via public DNS servers. Like KubeDNS, it retrieves a list of resources (Services, Ingresses, etc.) from the Kubernetes API to determine a desired list of DNS records. Unlike KubeDNS, however, it's not a DNS server itself, but merely configures other DNS providers accordingly\u2014e.g. AWS Route 53.
"},{"location":"addons/external-dns/#usage","title":"Usage","text":"External DNS can be deployed by enabling the add-on via the following.
enable_external_dns = true\n
You can optionally customize the Helm chart that deploys External DNS via the following configuration.
enable_external_dns = true\nexternal_dns = {\nname = \"external-dns\"\nchart_version = \"1.12.2\"\nrepository = \"https://kubernetes-sigs.github.io/external-dns/\"\nnamespace = \"external-dns\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\n}\nexternal_dns_route53_zone_arns = [\"XXXXXXXXXXXXXXXXXXXXXXX\"]\n
Verify external-dns pods are running.
$ kubectl get pods -n external-dns\nNAME READY STATUS RESTARTS AGE\nexternal-dns-849b89c675-ffnf6 1/1 Running 1 (2d ago) 2d5h\n
To further configure external-dns, refer to the examples:
External Secrets Operator is a Kubernetes operator that integrates external secret management systems like AWS Secrets Manager, HashiCorp Vault, Google Secrets Manager, Azure Key Vault, IBM Cloud Secrets Manager, and many more. The operator reads information from external APIs and automatically injects the values into a Kubernetes Secret.
"},{"location":"addons/external-secrets/#usage","title":"Usage","text":"External Secrets can be deployed by enabling the add-on via the following.
enable_external_secrets = true\n
You can optionally customize the Helm chart that deploys External Secrets via the following configuration.
enable_external_secrets = true\nexternal_secrets = {\nname = \"external-secrets\"\nchart_version = \"0.8.1\"\nrepository = \"https://charts.external-secrets.io\"\nnamespace = \"external-secrets\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n
Verify external-secrets pods are running.
$ kubectl get pods -n external-secrets\nNAME READY STATUS RESTARTS AGE\nexternal-secrets-67bfd5b47c-xc5xf 1/1 Running 1 (2d1h ago) 2d6h\nexternal-secrets-cert-controller-8f75c6f79-qcfx4 1/1 Running 1 (2d1h ago) 2d6h\nexternal-secrets-webhook-78f6bd456-76wmm 1/1 Running 1 (2d1h ago) 2d6h\n
"},{"location":"addons/fargate-fluentbit/","title":"Fargate FluentBit","text":"Amazon EKS on Fargate offers a built-in log router based on Fluent Bit. This means that you don't explicitly run a Fluent Bit container as a sidecar, but Amazon runs it for you. All that you have to do is configure the log router. The configuration happens through a dedicated ConfigMap, that is deployed via this Add-on.
"},{"location":"addons/fargate-fluentbit/#usage","title":"Usage","text":"To configure the Fargate Fluentbit ConfigMap via the EKS Blueprints Addons, just reference the following parameters under the module.eks_blueprints_addons
.
module \"eks_blueprints_addons\" {\nenable_fargate_fluentbit = true\nfargate_fluentbit = {\nflb_log_cw = true\n}\n}\n
It's possible to customize the CloudWatch Log Group parameters in the fargate_fluentbit_cw_log_group
configuration block:
fargate_fluentbit_cw_log_group = {\nname = \"existing-log-group\"\nname_prefix = \"dev-environment-logs\"\nretention_in_days = 7\nkms_key_id = \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"\nskip_destroy = true\n
"},{"location":"addons/fargate-fluentbit/#validation","title":"Validation","text":"aws-logging
configMap for Fargate Fluentbit was created.kubectl -n aws-observability get configmap aws-logging -o yaml\napiVersion: v1\ndata:\n filters.conf: |\n[FILTER]\nName parser\n Match *\n Key_Name log\n Parser regex\n Preserve_Key True\n Reserve_Data True\n flb_log_cw: \"true\"\noutput.conf: |\n[OUTPUT]\nName cloudwatch_logs\n Match *\n region us-west-2\n log_group_name /fargate-serverless/fargate-fluentbit-logs20230509014113352200000006\n log_stream_prefix fargate-logs-\n auto_create_group true\nparsers.conf: |\n[PARSER]\nName regex\n Format regex\n Regex ^(?<time>[^ ]+) (?<stream>[^ ]+) (?<logtag>[^ ]+) (?<message>.+)$\n Time_Key time\nTime_Format %Y-%m-%dT%H:%M:%S.%L%z\n Time_Keep On\n Decode_Field_As json message\nimmutable: false\nkind: ConfigMap\nmetadata:\n creationTimestamp: \"2023-05-08T21:14:52Z\"\nname: aws-logging\n namespace: aws-observability\n resourceVersion: \"1795\"\nuid: d822bcf5-a441-4996-857e-7fb1357bc07e\n
aws logs describe-log-groups --log-group-name-prefix \"/fargate-serverless/fargate-fluentbit\"\n{\n\"logGroups\": [\n{\n\"logGroupName\": \"/fargate-serverless/fargate-fluentbit-logs20230509014113352200000006\",\n \"creationTime\": 1683580491652,\n \"retentionInDays\": 90,\n \"metricFilterCount\": 0,\n \"arn\": \"arn:aws:logs:us-west-2:111122223333:log-group:/fargate-serverless/fargate-fluentbit-logs20230509014113352200000006:*\",\n \"storedBytes\": 0\n}\n]\n}\n
aws logs describe-log-streams --log-group-name \"/fargate-serverless/fargate-fluentbit-logs20230509014113352200000006\" --log-stream-name-prefix fargate-logs --query 'logStreams[].logStreamName'\n[\n\"fargate-logs-flblogs.var.log.fluent-bit.log\",\n \"fargate-logs-kube.var.log.containers.aws-load-balancer-controller-7f989fc6c-grjsq_kube-system_aws-load-balancer-controller-feaa22b4cdaa71ecfc8355feb81d4b61ea85598a7bb57aef07667c767c6b98e4.log\",\n \"fargate-logs-kube.var.log.containers.aws-load-balancer-controller-7f989fc6c-wzr46_kube-system_aws-load-balancer-controller-69075ea9ab3c7474eac2a1696d3a84a848a151420cd783d79aeef960b181567f.log\",\n \"fargate-logs-kube.var.log.containers.coredns-7b7bddbc85-8cxvq_kube-system_coredns-9e4f3ab435269a566bcbaa606c02c146ad58508e67cef09fa87d5c09e4ac0088.log\",\n \"fargate-logs-kube.var.log.containers.coredns-7b7bddbc85-gcjwp_kube-system_coredns-11016818361cd68c32bf8f0b1328f3d92a6d7b8cf5879bfe8b301f393cb011cc.log\"\n]\n
"},{"location":"addons/fargate-fluentbit/#resources","title":"Resources","text":"AWS Docs Fluent Bit for Amazon EKS on AWS Fargate Blog Post
"},{"location":"addons/ingress-nginx/","title":"Ingress Nginx","text":"This add-on installs Ingress Nginx Controller on Amazon EKS. The Ingress Nginx controller uses Nginx as a reverse proxy and load balancer.
Other than handling Kubernetes ingress objects, this ingress controller can facilitate multi-tenancy and segregation of workload ingresses based on host name (host-based routing) and/or URL Path (path based routing).
"},{"location":"addons/ingress-nginx/#usage","title":"Usage","text":"Ingress Nginx Controller can be deployed by enabling the add-on via the following.
enable_ingress_nginx = true\n
You can optionally customize the Helm chart that deploys ingress-nginx
via the following configuration.
enable_ingress_nginx = true\ningress_nginx = {\nname = \"ingress-nginx\"\nchart_version = \"4.6.1\"\nrepository = \"https://kubernetes.github.io/ingress-nginx\"\nnamespace = \"ingress-nginx\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n
Verify ingress-nginx pods are running.
$ kubectl get pods -n ingress-nginx\nNAME READY STATUS RESTARTS AGE\ningress-nginx-controller-f6c55fdc8-8bt2z 1/1 Running 0 44m\n
"},{"location":"addons/karpenter/","title":"Karpenter","text":""},{"location":"addons/karpenter/#prerequisites","title":"Prerequisites","text":"If deploying a node template that uses spot
, please ensure you have the Spot service linked role available in your account. You can run the following command to ensure this role is available:
aws iam create-service-linked-role --aws-service-name spot.amazonaws.com || true\n
"},{"location":"addons/karpenter/#validate","title":"Validate","text":"The following command will update the kubeconfig
on your local machine and allow you to interact with your EKS Cluster using kubectl
to validate the CoreDNS deployment for Fargate.
update-kubeconfig
command:aws eks --region <REGION> update-kubeconfig --name <CLUSTER_NAME>\n
kubectl get pods -n karpenter\n\n# Output should look similar to below\nNAME READY STATUS RESTARTS AGE\nkarpenter-6f97df4f77-5nqsk 1/1 Running 0 3m28s\nkarpenter-6f97df4f77-n7fkf 1/1 Running 0 3m28s\n
kubectl get nodes\n\n# Output should look similar to below\nNAME STATUS ROLES AGE VERSION\nfargate-ip-10-0-29-25.us-west-2.compute.internal Ready <none> 2m56s v1.26.3-eks-f4dc2c0\nfargate-ip-10-0-36-148.us-west-2.compute.internal Ready <none> 2m57s v1.26.3-eks-f4dc2c0\nfargate-ip-10-0-42-30.us-west-2.compute.internal Ready <none> 2m34s v1.26.3-eks-f4dc2c0\nfargate-ip-10-0-45-112.us-west-2.compute.internal Ready <none> 2m33s v1.26.3-eks-f4dc2c0\n
pause
deployment to demonstrate scaling:kubectl apply -f - <<EOF\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: inflate\nspec:\n replicas: 0\n selector:\n matchLabels:\n app: inflate\n template:\n metadata:\n labels:\n app: inflate\n spec:\n terminationGracePeriodSeconds: 0\n containers:\n - name: inflate\n image: public.ecr.aws/eks-distro/kubernetes/pause:3.7\n resources:\n requests:\n cpu: 1\nEOF\n
pause
deployment to see Karpenter respond by provisioning nodes to support the workload:kubectl scale deployment inflate --replicas 5\n# To view logs\n# kubectl logs -f -n karpenter -l app.kubernetes.io/name=karpenter -c controller\n
kubectl get nodes\n\n# Output should look similar to below\nNAME STATUS ROLES AGE VERSION\nfargate-ip-10-0-29-25.us-west-2.compute.internal Ready <none> 5m15s v1.26.3-eks-f4dc2c0\nfargate-ip-10-0-36-148.us-west-2.compute.internal Ready <none> 5m16s v1.26.3-eks-f4dc2c0\nfargate-ip-10-0-42-30.us-west-2.compute.internal Ready <none> 4m53s v1.26.3-eks-f4dc2c0\nfargate-ip-10-0-45-112.us-west-2.compute.internal Ready <none> 4m52s v1.26.3-eks-f4dc2c0\nip-10-0-1-184.us-west-2.compute.internal Ready <none> 26s v1.26.2-eks-a59e1f0 # <= new EC2 node launched\n
pause
deployment:kubectl delete deployment inflate\n
"},{"location":"addons/kube-prometheus-stack/","title":"Kube Prometheus Stack","text":"Kube Prometheus Stack is a collection of Kubernetes manifests, Grafana dashboards, and Prometheus rules combined with documentation and scripts to provide easy to operate end-to-end Kubernetes cluster monitoring with Prometheus using the Prometheus Operator.
"},{"location":"addons/kube-prometheus-stack/#usage","title":"Usage","text":"Kube Prometheus Stack can be deployed by enabling the add-on via the following.
enable_kube_prometheus_stack = true\n
You can optionally customize the Helm chart that deploys Kube Prometheus Stack via the following configuration.
enable_kube_prometheus_stack = true\nkube_prometheus_stack = {\nname = \"kube-prometheus-stack\"\nchart_version = \"45.10.1\"\nrepository = \"https://charts.external-secrets.io\"\nnamespace = \"kube-prometheus-stack\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n
Verify kube-prometheus-stack pods are running.
$ kubectl get pods -n external-secrets\nNAME READY STATUS RESTARTS AGE\nalertmanager-kube-prometheus-stack-alertmanager-0 2/2 Running 3 (2d2h ago) 2d7h\nkube-prometheus-stack-grafana-5c6cf88fd9-8wc9k 3/3 Running 3 (2d2h ago) 2d7h\nkube-prometheus-stack-kube-state-metrics-584d8b5d5f-s6p8d 1/1 Running 1 (2d2h ago) 2d7h\nkube-prometheus-stack-operator-c74ddccb5-8cprr 1/1 Running 1 (2d2h ago) 2d7h\nkube-prometheus-stack-prometheus-node-exporter-vd8lw 1/1 Running 1 (2d2h ago) 2d7h\nprometheus-kube-prometheus-stack-prometheus-0 2/2 Running 2 (2d2h ago) 2d7h\n
"},{"location":"addons/metrics-server/","title":"Metrics Server","text":"Metrics Server is a scalable, efficient source of container resource metrics for Kubernetes built-in autoscaling pipelines.
Metrics Server collects resource metrics from Kubelets and exposes them in Kubernetes apiserver through Metrics API for use by Horizontal Pod Autoscaler and Vertical Pod Autoscaler. Metrics API can also be accessed by kubectl top, making it easier to debug autoscaling pipelines.
"},{"location":"addons/metrics-server/#usage","title":"Usage","text":"Metrics Server can be deployed by enabling the add-on via the following.
enable_metrics_server = true\n
You can optionally customize the Helm chart that deploys External DNS via the following configuration.
enable_metrics_server = true\nmetrics_server = {\nname = \"metrics-server\"\nchart_version = \"3.10.0\"\nrepository = \"https://kubernetes-sigs.github.io/metrics-server/\"\nnamespace = \"kube-system\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n
Verify metrics-server pods are running.
$ kubectl get pods -n kube-system\nNAME READY STATUS RESTARTS AGE\nmetrics-server-6f9cdd486c-njh8b 1/1 Running 1 (2d2h ago) 2d7h\n
"},{"location":"addons/opa-gatekeeper/","title":"OPA Gatekeeper","text":"Gatekeeper is an admission controller that validates requests to create and update Pods on Kubernetes clusters, using the Open Policy Agent (OPA). Using Gatekeeper allows administrators to define policies with a constraint, which is a set of conditions that permit or deny deployment behaviors in Kubernetes.
For complete project documentation, please visit the Gatekeeper. For reference templates refer Templates
"},{"location":"addons/opa-gatekeeper/#usage","title":"Usage","text":"Gatekeeper can be deployed by enabling the add-on via the following.
enable_gatekeeper = true\n
You can also customize the Helm chart that deploys gatekeeper
via the following configuration:
enable_gatekeeper = true\ngatekeeper = {\nname = \"gatekeeper\"\nchart_version = \"3.12.0\"\nrepository = \"https://open-policy-agent.github.io/gatekeeper/charts\"\nnamespace = \"gatekeeper-system\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n
"},{"location":"addons/secrets-store-csi-driver-provider-aws/","title":"AWS Secrets Manager and Config Provider for Secret Store CSI Driver","text":"AWS offers two services to manage secrets and parameters conveniently in your code. AWS Secrets Manager allows you to easily rotate, manage, and retrieve database credentials, API keys, certificates, and other secrets throughout their lifecycle. AWS Systems Manager Parameter Store provides hierarchical storage for configuration data. The AWS provider for the Secrets Store CSI Driver allows you to make secrets stored in Secrets Manager and parameters stored in Parameter Store appear as files mounted in Kubernetes pods.
"},{"location":"addons/secrets-store-csi-driver-provider-aws/#usage","title":"Usage","text":"AWS Secrets Store CSI Driver can be deployed by enabling the add-on via the following.
enable_secrets_store_csi_driver = true\nenable_secrets_store_csi_driver_provider_aws = true\n
You can optionally customize the Helm chart via the following configuration.
enable_secrets_store_csi_driver = true\nenable_secrets_store_csi_driver_provider_aws = true\nsecrets_store_csi_driver_provider_aws = {\nname = \"secrets-store-csi-driver\"\nchart_version = \"0.3.2\"\nrepository = \"https://aws.github.io/secrets-store-csi-driver-provider-aws\"\nnamespace = \"kube-system\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n
Verify metrics-server pods are running.
$ kubectl get pods -n kube-system\nNAME READY STATUS RESTARTS AGE\nsecrets-store-csi-driver-9l2z8 3/3 Running 1 (2d5h ago) 2d9h\nsecrets-store-csi-driver-provider-aws-2qqkk 1/1 Running 1 (2d5h ago) 2d9h\n
"},{"location":"addons/velero/","title":"Velero","text":"Velero is an open source tool to safely backup and restore, perform disaster recovery, and migrate Kubernetes cluster resources and persistent volumes.
Velero can be deployed by enabling the add-on via the following.
enable_velero = true\nvelero_backup_s3_bucket = \"<YOUR_BUCKET_NAME>\"\nvelero = {\ns3_backup_location = \"<YOUR_S3_BUCKET_ARN>[/prefix]\"\n}\n
You can also customize the Helm chart that deploys velero
via the following configuration:
enable_velero = true\nvelero = {\nname = \"velero\"\ndescription = \"A Helm chart for velero\"\nchart_version = \"3.1.6\"\nrepository = \"https://vmware-tanzu.github.io/helm-charts/\"\nnamespace = \"velero\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n
To see a working example, see the stateful
example blueprint.
update-kubeconfig
command:aws eks --region <REGION> update-kubeconfig --name <CLUSTER_NAME>\n
kubectl get all -n velero\n\n# Output should look similar to below\nNAME READY STATUS RESTARTS AGE\npod/velero-7b8994d56-z89sl 1/1 Running 0 25h\n\nNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE\nservice/velero ClusterIP 172.20.20.118 <none> 8085/TCP 25h\n\nNAME READY UP-TO-DATE AVAILABLE AGE\ndeployment.apps/velero 1/1 1 1 25h\n\nNAME DESIRED CURRENT READY AGE\nreplicaset.apps/velero-7b8994d56 1 1 1 25h\n
velero backup-location get\n\n# Output should look similar to below\nNAME PROVIDER BUCKET/PREFIX PHASE LAST VALIDATED ACCESS MODE DEFAULT\ndefault aws stateful-20230503175301619800000005/backups Available 2023-05-04 15:15:00 -0400 EDT ReadWrite true\n
kubectl create namespace backupdemo\nkubectl run nginx --image=nginx -n backupdemo\n
velero backup create backup1 --include-namespaces backupdemo\n\n# Output should look similar to below\nBackup request \"backup1\" submitted successfully.\nRun `velero backup describe backup1` or `velero backup logs backup1` for more details.\n
velero backup describe backup1\n\n# Output should look similar to below\nName: backup1\nNamespace: velero\nLabels: velero.io/storage-location=default\nAnnotations: velero.io/source-cluster-k8s-gitversion=v1.26.2-eks-a59e1f0\n velero.io/source-cluster-k8s-major-version=1\nvelero.io/source-cluster-k8s-minor-version=26+\n\nPhase: Completed\n\nNamespaces:\n Included: backupdemo\n Excluded: <none>\n\nResources:\n Included: *\n Excluded: <none>\n Cluster-scoped: auto\n\nLabel selector: <none>\n\nStorage Location: default\n\nVelero-Native Snapshot PVs: auto\n\nTTL: 720h0m0s\n\nCSISnapshotTimeout: 10m0s\nItemOperationTimeout: 0s\n\nHooks: <none>\n\nBackup Format Version: 1.1.0\n\nStarted: 2023-05-04 15:16:31 -0400 EDT\nCompleted: 2023-05-04 15:16:33 -0400 EDT\n\nExpiration: 2023-06-03 15:16:31 -0400 EDT\n\nTotal items to be backed up: 9\nItems backed up: 9\nVelero-Native Snapshots: <none included>\n
kubectl delete namespace backupdemo\n
velero restore create --from-backup backup1\n
kubectl get all -n backupdemo\n\n# Output should look similar to below\nNAME READY STATUS RESTARTS AGE\npod/nginx 1/1 Running 0 21s\n
"},{"location":"addons/vertical-pod-autoscaler/","title":"Vertical Pod Autoscaler","text":"VPA Vertical Pod Autoscaler (VPA) automatically adjusts the CPU and memory reservations for your pods to help \"right size\" your applications. When configured, it will automatically request the necessary reservations based on usage and thus allow proper scheduling onto nodes so that the appropriate resource amount is available for each pod. It will also maintain ratios between limits and requests that were specified in initial container configuration.
NOTE: Metrics Server add-on is a dependency for this addon
"},{"location":"addons/vertical-pod-autoscaler/#usage","title":"Usage","text":"This step deploys the Vertical Pod Autoscaler with default Helm Chart config
enable_vpa = true\nenable_metrics_server = true\n
You can also customize the Helm chart that deploys vpa
via the following configuration:
enable_vpa = true\nenable_metrics_server = true\nvpa = {\nname = \"vpa\"\nchart_version = \"1.7.5\"\nrepository = \"https://charts.fairwinds.com/stable\"\nnamespace = \"vpa\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n
"}]}
\ No newline at end of file
+{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Overview","text":""},{"location":"#amazon-eks-blueprints-addons","title":"Amazon EKS Blueprints Addons","text":"Terraform module to deploy Kubernetes addons on Amazon EKS clusters.
"},{"location":"#usage","title":"Usage","text":"module \"eks\" {\nsource = \"terraform-aws-modules/eks/aws\"\ncluster_name = \"my-cluster\"\ncluster_version = \"1.27\"\n... truncated for brevity\n}\nmodule \"eks_blueprints_addons\" {\nsource = \"aws-ia/eks-blueprints-addons/aws\"\nversion = \"~> 1.0\" #ensure to update this to the latest/desired version\ncluster_name = module.eks.cluster_name\ncluster_endpoint = module.eks.cluster_endpoint\ncluster_version = module.eks.cluster_version\noidc_provider_arn = module.eks.oidc_provider_arn\neks_addons = {\naws-ebs-csi-driver = {\nmost_recent = true\n}\ncoredns = {\nmost_recent = true\n}\nvpc-cni = {\nmost_recent = true\n}\nkube-proxy = {\nmost_recent = true\n}\n}\nenable_aws_load_balancer_controller = true\nenable_cluster_proportional_autoscaler = true\nenable_karpenter = true\nenable_kube_prometheus_stack = true\nenable_metrics_server = true\nenable_external_dns = true\nenable_cert_manager = true\ncert_manager_route53_hosted_zone_arns = [\"arn:aws:route53:::hostedzone/XXXXXXXXXXXXX\"]\ntags = {\nEnvironment = \"dev\"\n}\n}\n
"},{"location":"#requirements","title":"Requirements","text":"Name Version terraform >= 1.0 aws >= 5.0 helm >= 2.9 kubernetes >= 2.20 time >= 0.9"},{"location":"#providers","title":"Providers","text":"Name Version aws >= 5.0 helm >= 2.9 kubernetes >= 2.20 time >= 0.9"},{"location":"#modules","title":"Modules","text":"Name Source Version argo_events aws-ia/eks-blueprints-addon/aws 1.1.0 argo_rollouts aws-ia/eks-blueprints-addon/aws 1.1.0 argo_workflows aws-ia/eks-blueprints-addon/aws 1.1.0 argocd aws-ia/eks-blueprints-addon/aws 1.1.0 aws_cloudwatch_metrics aws-ia/eks-blueprints-addon/aws 1.1.0 aws_efs_csi_driver aws-ia/eks-blueprints-addon/aws 1.1.0 aws_for_fluentbit aws-ia/eks-blueprints-addon/aws 1.1.0 aws_fsx_csi_driver aws-ia/eks-blueprints-addon/aws 1.1.0 aws_gateway_api_controller aws-ia/eks-blueprints-addon/aws 1.1.0 aws_load_balancer_controller aws-ia/eks-blueprints-addon/aws 1.1.0 aws_node_termination_handler aws-ia/eks-blueprints-addon/aws 1.1.0 aws_node_termination_handler_sqs terraform-aws-modules/sqs/aws 4.0.1 aws_privateca_issuer aws-ia/eks-blueprints-addon/aws 1.1.0 cert_manager aws-ia/eks-blueprints-addon/aws 1.1.0 cluster_autoscaler aws-ia/eks-blueprints-addon/aws 1.1.0 cluster_proportional_autoscaler aws-ia/eks-blueprints-addon/aws 1.1.0 external_dns aws-ia/eks-blueprints-addon/aws 1.1.0 external_secrets aws-ia/eks-blueprints-addon/aws 1.1.0 gatekeeper aws-ia/eks-blueprints-addon/aws 1.1.0 ingress_nginx aws-ia/eks-blueprints-addon/aws 1.1.0 karpenter aws-ia/eks-blueprints-addon/aws 1.1.0 karpenter_sqs terraform-aws-modules/sqs/aws 4.0.1 kube_prometheus_stack aws-ia/eks-blueprints-addon/aws 1.1.0 metrics_server aws-ia/eks-blueprints-addon/aws 1.1.0 secrets_store_csi_driver aws-ia/eks-blueprints-addon/aws 1.1.0 secrets_store_csi_driver_provider_aws aws-ia/eks-blueprints-addon/aws 1.1.0 velero aws-ia/eks-blueprints-addon/aws 1.1.0 vpa aws-ia/eks-blueprints-addon/aws 1.1.0"},{"location":"#resources","title":"Resources","text":"Name Type aws_autoscaling_group_tag.aws_node_termination_handler resource aws_autoscaling_lifecycle_hook.aws_node_termination_handler resource aws_cloudwatch_event_rule.aws_node_termination_handler resource aws_cloudwatch_event_rule.karpenter resource aws_cloudwatch_event_target.aws_node_termination_handler resource aws_cloudwatch_event_target.karpenter resource aws_cloudwatch_log_group.aws_for_fluentbit resource aws_cloudwatch_log_group.fargate_fluentbit resource aws_eks_addon.this resource aws_iam_instance_profile.karpenter resource aws_iam_policy.fargate_fluentbit resource aws_iam_role.karpenter resource aws_iam_role_policy_attachment.additional resource aws_iam_role_policy_attachment.karpenter resource helm_release.this resource kubernetes_config_map_v1.aws_logging resource kubernetes_namespace_v1.aws_observability resource time_sleep.this resource aws_caller_identity.current data source aws_eks_addon_version.this data source aws_iam_policy_document.aws_efs_csi_driver data source aws_iam_policy_document.aws_for_fluentbit data source aws_iam_policy_document.aws_fsx_csi_driver data source aws_iam_policy_document.aws_gateway_api_controller data source aws_iam_policy_document.aws_load_balancer_controller data source aws_iam_policy_document.aws_node_termination_handler data source aws_iam_policy_document.aws_privateca_issuer data source aws_iam_policy_document.cert_manager data source aws_iam_policy_document.cluster_autoscaler data source aws_iam_policy_document.external_dns data source aws_iam_policy_document.external_secrets data source aws_iam_policy_document.fargate_fluentbit data source aws_iam_policy_document.karpenter data source aws_iam_policy_document.karpenter_assume_role data source aws_iam_policy_document.velero data source aws_partition.current data source aws_region.current data source"},{"location":"#inputs","title":"Inputs","text":"Name Description Type Default Required argo_events Argo Events add-on configuration values any
{}
no argo_rollouts Argo Rollouts add-on configuration values any
{}
no argo_workflows Argo Workflows add-on configuration values any
{}
no argocd ArgoCD add-on configuration values any
{}
no aws_cloudwatch_metrics Cloudwatch Metrics add-on configuration values any
{}
no aws_efs_csi_driver EFS CSI Driver add-on configuration values any
{}
no aws_for_fluentbit AWS Fluentbit add-on configurations any
{}
no aws_for_fluentbit_cw_log_group AWS Fluentbit CloudWatch Log Group configurations any
{}
no aws_fsx_csi_driver FSX CSI Driver add-on configuration values any
{}
no aws_gateway_api_controller AWS Gateway API Controller add-on configuration values any
{}
no aws_load_balancer_controller AWS Load Balancer Controller add-on configuration values any
{}
no aws_node_termination_handler AWS Node Termination Handler add-on configuration values any
{}
no aws_node_termination_handler_asg_arns List of Auto Scaling group ARNs that AWS Node Termination Handler will monitor for EC2 events list(string)
[]
no aws_node_termination_handler_sqs AWS Node Termination Handler SQS queue configuration values any
{}
no aws_privateca_issuer AWS PCA Issuer add-on configurations any
{}
no cert_manager cert-manager add-on configuration values any
{}
no cert_manager_route53_hosted_zone_arns List of Route53 Hosted Zone ARNs that are used by cert-manager to create DNS records list(string)
[ \"arn:aws:route53:::hostedzone/*\"]no cluster_autoscaler Cluster Autoscaler add-on configuration values
any
{}
no cluster_endpoint Endpoint for your Kubernetes API server string
n/a yes cluster_name Name of the EKS cluster string
n/a yes cluster_proportional_autoscaler Cluster Proportional Autoscaler add-on configurations any
{}
no cluster_version Kubernetes <major>.<minor>
version to use for the EKS cluster (i.e.: 1.24
) string
n/a yes create_delay_dependencies Dependency attribute which must be resolved before starting the create_delay_duration
list(string)
[]
no create_delay_duration The duration to wait before creating resources string
\"30s\"
no eks_addons Map of EKS add-on configurations to enable for the cluster. Add-on name can be the map keys or set with name
any
{}
no eks_addons_timeouts Create, update, and delete timeout configurations for the EKS add-ons map(string)
{}
no enable_argo_events Enable Argo Events add-on bool
false
no enable_argo_rollouts Enable Argo Rollouts add-on bool
false
no enable_argo_workflows Enable Argo workflows add-on bool
false
no enable_argocd Enable Argo CD Kubernetes add-on bool
false
no enable_aws_cloudwatch_metrics Enable AWS Cloudwatch Metrics add-on for Container Insights bool
false
no enable_aws_efs_csi_driver Enable AWS EFS CSI Driver add-on bool
false
no enable_aws_for_fluentbit Enable AWS for FluentBit add-on bool
false
no enable_aws_fsx_csi_driver Enable AWS FSX CSI Driver add-on bool
false
no enable_aws_gateway_api_controller Enable AWS Gateway API Controller add-on bool
false
no enable_aws_load_balancer_controller Enable AWS Load Balancer Controller add-on bool
false
no enable_aws_node_termination_handler Enable AWS Node Termination Handler add-on bool
false
no enable_aws_privateca_issuer Enable AWS PCA Issuer bool
false
no enable_cert_manager Enable cert-manager add-on bool
false
no enable_cluster_autoscaler Enable Cluster autoscaler add-on bool
false
no enable_cluster_proportional_autoscaler Enable Cluster Proportional Autoscaler bool
false
no enable_external_dns Enable external-dns operator add-on bool
false
no enable_external_secrets Enable External Secrets operator add-on bool
false
no enable_fargate_fluentbit Enable Fargate FluentBit add-on bool
false
no enable_gatekeeper Enable Gatekeeper add-on bool
false
no enable_ingress_nginx Enable Ingress Nginx bool
false
no enable_karpenter Enable Karpenter controller add-on bool
false
no enable_kube_prometheus_stack Enable Kube Prometheus Stack bool
false
no enable_metrics_server Enable metrics server add-on bool
false
no enable_secrets_store_csi_driver Enable CSI Secrets Store Provider bool
false
no enable_secrets_store_csi_driver_provider_aws Enable AWS CSI Secrets Store Provider bool
false
no enable_velero Enable Kubernetes Dashboard add-on bool
false
no enable_vpa Enable Vertical Pod Autoscaler add-on bool
false
no external_dns external-dns add-on configuration values any
{}
no external_dns_route53_zone_arns List of Route53 zones ARNs which external-dns will have access to create/manage records (if using Route53) list(string)
[]
no external_secrets External Secrets add-on configuration values any
{}
no external_secrets_kms_key_arns List of KMS Key ARNs that are used by Secrets Manager that contain secrets to mount using External Secrets list(string)
[ \"arn:aws:kms:::key/*\"]no external_secrets_secrets_manager_arns List of Secrets Manager ARNs that contain secrets to mount using External Secrets
list(string)
[ \"arn:aws:secretsmanager:::secret:*\"]no external_secrets_ssm_parameter_arns List of Systems Manager Parameter ARNs that contain secrets to mount using External Secrets
list(string)
[ \"arn:aws:ssm:::parameter/*\"]no fargate_fluentbit Fargate fluentbit add-on config
any
{}
no fargate_fluentbit_cw_log_group AWS Fargate Fluentbit CloudWatch Log Group configurations any
{}
no gatekeeper Gatekeeper add-on configuration any
{}
no helm_releases A map of Helm releases to create. This provides the ability to pass in an arbitrary map of Helm chart definitions to create any
{}
no ingress_nginx Ingress Nginx add-on configurations any
{}
no karpenter Karpenter add-on configuration values any
{}
no karpenter_enable_spot_termination Determines whether to enable native node termination handling bool
true
no karpenter_node Karpenter IAM role and IAM instance profile configuration values any
{}
no karpenter_sqs Karpenter SQS queue for native node termination handling configuration values any
{}
no kube_prometheus_stack Kube Prometheus Stack add-on configurations any
{}
no metrics_server Metrics Server add-on configurations any
{}
no oidc_provider_arn The ARN of the cluster OIDC Provider string
n/a yes secrets_store_csi_driver CSI Secrets Store Provider add-on configurations any
{}
no secrets_store_csi_driver_provider_aws CSI Secrets Store Provider add-on configurations any
{}
no tags A map of tags to add to all resources map(string)
{}
no velero Velero add-on configuration values any
{}
no vpa Vertical Pod Autoscaler add-on configuration values any
{}
no"},{"location":"#outputs","title":"Outputs","text":"Name Description argo_events Map of attributes of the Helm release created argo_rollouts Map of attributes of the Helm release created argo_workflows Map of attributes of the Helm release created argocd Map of attributes of the Helm release created aws_cloudwatch_metrics Map of attributes of the Helm release and IRSA created aws_efs_csi_driver Map of attributes of the Helm release and IRSA created aws_for_fluentbit Map of attributes of the Helm release and IRSA created aws_fsx_csi_driver Map of attributes of the Helm release and IRSA created aws_gateway_api_controller Map of attributes of the Helm release and IRSA created aws_load_balancer_controller Map of attributes of the Helm release and IRSA created aws_node_termination_handler Map of attributes of the Helm release and IRSA created aws_privateca_issuer Map of attributes of the Helm release and IRSA created cert_manager Map of attributes of the Helm release and IRSA created cluster_autoscaler Map of attributes of the Helm release and IRSA created cluster_proportional_autoscaler Map of attributes of the Helm release and IRSA created eks_addons Map of attributes for each EKS addons enabled external_dns Map of attributes of the Helm release and IRSA created external_secrets Map of attributes of the Helm release and IRSA created fargate_fluentbit Map of attributes of the configmap and IAM policy created gatekeeper Map of attributes of the Helm release and IRSA created helm_releases Map of attributes of the Helm release created ingress_nginx Map of attributes of the Helm release and IRSA created karpenter Map of attributes of the Helm release and IRSA created kube_prometheus_stack Map of attributes of the Helm release and IRSA created metrics_server Map of attributes of the Helm release and IRSA created secrets_store_csi_driver Map of attributes of the Helm release and IRSA created secrets_store_csi_driver_provider_aws Map of attributes of the Helm release and IRSA created velero Map of attributes of the Helm release and IRSA created vpa Map of attributes of the Helm release and IRSA created"},{"location":"amazon-eks-addons/","title":"Amazon EKS Add-ons","text":"The Amazon EKS add-on implementation is generic and can be used to deploy any add-on supported by the EKS API; either native EKS addons or third party add-ons supplied via the AWS Marketplace.
See the EKS documentation for more details on EKS addon-ons, including the list of Amazon EKS add-ons from Amazon EKS, as well as Additional Amazon EKS add-ons from independent software vendors.
"},{"location":"amazon-eks-addons/#architecture-support","title":"Architecture Support","text":"The Amazon EKS provided add-ons listed below support both x86_64/amd64
and arm64
architectures. Third party add-ons that are available via the AWS Marketplace will vary based on the support provided by the add-on vendor. No additional changes are required to add-on configurations when switching between x86_64/amd64
and arm64
architectures; Amazon EKS add-ons utilize multi-architecture container images to support this functionality.
vpc-cni
\u2705 \u2705 aws-ebs-csi-driver
\u2705 \u2705 coredns
\u2705 \u2705 kube-proxy
\u2705 \u2705 adot
\u2705 \u2705 aws-guardduty-agent
\u2705 \u2705"},{"location":"amazon-eks-addons/#usage","title":"Usage","text":"The Amazon EKS add-ons are provisioned via a generic interface behind the eks_addons
argument which accepts a map of add-on configurations. The generic interface for an add-on is defined below for reference:
module \"eks_blueprints_addons\" {\nsource = \"aws-ia/eks-blueprints-addons/aws\"\n # ... truncated for brevity\neks_addons = {\n<key> = {\nname = string # Optional - <key> is used if `name` is not set\nmost_recent = bool\naddon_version = string # overrides `most_recent` if set\nconfiguration_values = string # JSON string\npreserve = bool # defaults to `true`\nresolve_conflicts_on_create = string # defaults to `OVERWRITE`\nresolve_conflicts_on_update = string # defaults to `OVERWRITE`\ntimeouts = {\ncreate = string # optional\nupdate = string # optional\ndelete = string # optional\n}\ntags = map(string)\n}\n}\n}\n
"},{"location":"amazon-eks-addons/#example","title":"Example","text":"module \"eks_blueprints_addons\" {\nsource = \"aws-ia/eks-blueprints-addons/aws\"\n # ... truncated for brevity\neks_addons = {\n # Amazon EKS add-ons\naws-ebs-csi-driver = {\nmost_recent = true\nservice_account_role_arn = module.ebs_csi_driver_irsa.iam_role_arn\n}\ncoredns = {\nmost_recent = true\ntimeouts = {\ncreate = \"25m\"\ndelete = \"10m\"\n}\n}\nvpc-cni = {\nmost_recent = true\nservice_account_role_arn = module.vpc_cni_irsa.iam_role_arn\n}\nkube-proxy = {}\n # Third party add-ons via AWS Marketplace\nkubecost_kubecost = {\nmost_recent = true\n}\nteleport_teleport = {\nmost_recent = true\n}\n}\n}\n
"},{"location":"amazon-eks-addons/#configuration-values","title":"Configuration Values","text":"You can supply custom configuration values to each addon via the configuration_values
argument of the add-on definition. The value provided must be a JSON encoded string and adhere to the JSON scheme provided by the version of the add-on. You can view this schema using the awscli by supplying the add-on name and version to the describe-addon-configuration
command:
aws eks describe-addon-configuration \\\n--addon-name coredns \\\n--addon-version v1.8.7-eksbuild.2 \\\n--query 'configurationSchema' \\\n--output text | jq\n
Which returns the formatted JSON schema like below:
{\n\"$ref\": \"#/definitions/Coredns\",\n\"$schema\": \"http://json-schema.org/draft-06/schema#\",\n\"definitions\": {\n\"Coredns\": {\n\"additionalProperties\": false,\n\"properties\": {\n\"computeType\": {\n\"type\": \"string\"\n},\n\"corefile\": {\n\"description\": \"Entire corefile contents to use with installation\",\n\"type\": \"string\"\n},\n\"nodeSelector\": {\n\"additionalProperties\": {\n\"type\": \"string\"\n},\n\"type\": \"object\"\n},\n\"replicaCount\": {\n\"type\": \"integer\"\n},\n\"resources\": {\n\"$ref\": \"#/definitions/Resources\"\n}\n},\n\"title\": \"Coredns\",\n\"type\": \"object\"\n},\n\"Limits\": {\n\"additionalProperties\": false,\n\"properties\": {\n\"cpu\": {\n\"type\": \"string\"\n},\n\"memory\": {\n\"type\": \"string\"\n}\n},\n\"title\": \"Limits\",\n\"type\": \"object\"\n},\n\"Resources\": {\n\"additionalProperties\": false,\n\"properties\": {\n\"limits\": {\n\"$ref\": \"#/definitions/Limits\"\n},\n\"requests\": {\n\"$ref\": \"#/definitions/Limits\"\n}\n},\n\"title\": \"Resources\",\n\"type\": \"object\"\n}\n}\n}\n
You can supply the configuration values to the add-on by passing a map of the values wrapped in the jsonencode()
function as shown below:
module \"eks_blueprints_addons\" {\nsource = \"aws-ia/eks-blueprints-addons/aws\"\n # ... truncated for brevity\neks_addons = {\ncoredns = {\nmost_recent = true\nconfiguration_values = jsonencode({\nreplicaCount = 4\nresources = {\nlimits = {\ncpu = \"100m\"\nmemory = \"150Mi\"\n}\nrequests = {\ncpu = \"100m\"\nmemory = \"150Mi\"\n}\n}\n})\n}\n}\n}\n
"},{"location":"architectures/","title":"Architectures","text":""},{"location":"architectures/#addons","title":"Addons","text":"Addon x86_64/amd64 arm64 Argo Rollouts \u2705 \u2705 Argo Workflows \u2705 \u2705 Argo CD \u2705 \u2705 AWS CloudWatch Metrics \u2705 \u2705 AWS EFS CSI Driver \u2705 \u2705 AWS for FluentBit \u2705 \u2705 AWS FSx CSI Driver \u2705 \u2705 AWS Load Balancer Controller \u2705 \u2705 AWS Node Termination Handler \u2705 \u2705 AWS Private CA Issuer \u2705 \u2705 Cert Manager \u2705 \u2705 Cluster Autoscaler \u2705 \u2705 Cluster Proportional Autoscaler \u2705 \u2705 External DNS \u2705 \u2705 External Secrets \u2705 \u2705 OPA Gatekeeper \u2705 \u2705 Ingress Nginx \u2705 \u2705 Karpenter \u2705 \u2705 Kube-Prometheus Stack \u2705 \u2705 Metrics Server \u2705 \u2705 Secrets Store CSI Driver \u2705 \u2705 Secrets Store CSI Driver Provider AWS \u2705 \u2705 Velero \u2705 \u2705 Vertical Pod Autoscaler \u2705 \u2705"},{"location":"architectures/#amazon-eks-addons","title":"Amazon EKS Addons","text":"The Amazon EKS provided add-ons listed below support both x86_64/amd64
and arm64
architectures. Third party add-ons that are available via the AWS Marketplace will vary based on the support provided by the add-on vendor. No additional changes are required to add-on configurations when switching between x86_64/amd64
and arm64
architectures; Amazon EKS add-ons utilize multi-architecture container images to support this functionality. These addons are specified via the eks_addons
input variable.
The following addons are provided by AWS Partners for use with Amazon EKS Blueprints for Terraform. Please see the respective addon repository for more information on the addon, its supported configuration values, as well as questions, comments, and feature requests.
Addon Description Ondat Ondat is a Kubernetes-native storage platform that enables stateful applications to run on Kubernetes. Hashicorp - Consul Consul is a service networking solution to automate network configurations, discover services, and enable secure connectivity across any cloud or runtime. Hashicorp - Vault Vault secures, stores, and tightly controls access to tokens, passwords, certificates, API keys, and other secrets in modern computing. Sysdig Sysdig CNAPP helps you stop cloud and container security attacks with no wasted time. Tetrate Istio Tetrate Istio Distro is an open source project from Tetrate that provides vetted builds of Istio tested against all major cloud platforms. NetApp ONTAP Astra Trident NetApp's Astra Trident provides dynamic storage orchestration for FSx for NetApp ONTAP using a Container Storage Interface (CSI) compliant driver."},{"location":"helm-release/","title":"Helm Release Add-ons","text":"Starting with EKS Blueprints v5 we have made a decision to only support the provisioning of a certain core set of add-ons. On an going basis, we will evaluate the current list to see if more add-ons need to be supported via this repo. Typically you can expect that any AWS created add-on that is not yet available via the Amazon EKS add-ons will be prioritized to be provisioned through this repository.
In addition to these AWS add-ons, we will also support the provisioning of certain OSS add-ons that we think customers will benefit from. These are selected based on customer demand (e.g. metrics-server) and certain patterns (gitops) that are foundational elements for a complete blueprint of an EKS cluster.
One of the reasons customers pick Kubernetes is because of its strong commercial and open-source software ecosystem and would like to provision add-ons that are not necessarily supported by EKS Blueprints. For such add-ons the options are as following:
"},{"location":"helm-release/#with-helm_release-terraform-resource","title":"Withhelm_release
Terraform Resource","text":"The helm_release resource is the most fundamental way to provision a helm chart via Terraform.
Use this resource, if you need to control the lifecycle add-ons down to level of each add-on resource.
"},{"location":"helm-release/#with-helm_releases-variable","title":"Withhelm_releases
Variable","text":"You can use the helm_releases
variable in EKS Blueprints Add-ons to provide a map of add-ons and their respective Helm configuration. Under the hood, we just iterate through the provided map and pass each configuration to the Terraform helm_release resource.
E.g.
module \"addons\" {\nsource = \"aws-ia/eks-blueprints-addons/aws\"\nversion = \"~> 1.0\"\ncluster_name = \"<cluster_name>\"\ncluster_endpoint = \"<cluster_endpoint>\"\ncluster_version = \"<cluster_version>\"\noidc_provider_arn = \"<oidc_provider_arn>\"\n # EKS add-ons\neks_addons = {\ncoredns = {}\nvpc-cni = {}\nkube-proxy = {}\n}\n # Blueprints add-ons\nenable_aws_efs_csi_driver = true\nenable_aws_cloudwatch_metrics = true\nenable_cert_manager = true\n...\n # Pass in any number of Helm charts to be created for those that are not natively supported\nhelm_releases = {\nprometheus-adapter = {\ndescription = \"A Helm chart for k8s prometheus adapter\"\nnamespace = \"prometheus-adapter\"\ncreate_namespace = true\nchart = \"prometheus-adapter\"\nchart_version = \"4.2.0\"\nrepository = \"https://prometheus-community.github.io/helm-charts\"\nvalues = [\n<<-EOT\n replicas: 2\n podDisruptionBudget:\n enabled: true\n EOT\n]\n}\ngpu-operator = {\ndescription = \"A Helm chart for NVIDIA GPU operator\"\nnamespace = \"gpu-operator\"\ncreate_namespace = true\nchart = \"gpu-operator\"\nchart_version = \"v23.3.2\"\nrepository = \"https://nvidia.github.io/gpu-operator\"\nvalues = [\n<<-EOT\n operator:\n defaultRuntime: containerd\n EOT\n]\n}\n}\ntags = local.tags\n}\n
With this pattern, the lifecycle of all your add-ons is tied to that of the addons
module. This allows you to easily target the addon module in your Terraform apply and destroy commands. E.g.
terraform apply -target=module.addons\n\nterraform destroy -target=module.addons\n
"},{"location":"helm-release/#with-eks-blueprints-addon-module","title":"With EKS Blueprints Addon Module","text":"If you have an add-on that requires an IAM Role for Service Account (IRSA), we have created a new Terraform module terraform-aws-eks-blueprints-addon that can help provision a Helm chart along with an IAM role and policies with permissions required for the add-on to function properly. We use this module for all of the add-ons that are provisioned by EKS Blueprints Add-ons today.
You can optionally use this module for add-ons that do not need IRSA or even just to create the IAM resources for IRSA and skip the helm release. Detailed usage of how to consume this module can be found in its readme.
This pattern can be used to create a Terraform module with a set of add-ons that are not supported in the EKS Blueprints Add-ons today and wrap them in the same module definition. An example of this is the ACK add-ons repository which is a collection of ACK helm chart deployments with IRSA for each of the ACK controllers.
"},{"location":"addons/argo-events/","title":"Argo Events","text":"Argo Events is an open source container-native event-driven workflow automation framework for Kubernetes which helps you trigger K8s objects, Argo Workflows, Serverless workloads, etc. on events from a variety of sources. Argo Events is implemented as a Kubernetes CRD (Custom Resource Definition).
"},{"location":"addons/argo-events/#usage","title":"Usage","text":"Argo Events can be deployed by enabling the add-on via the following.
enable_argo_events = true\n
You can optionally customize the Helm chart that deploys Argo Events via the following configuration.
enable_argo_events = true\nargo_events = {\nname = \"argo-events\"\nchart_version = \"2.4.0\"\nrepository = \"https://argoproj.github.io/argo-helm\"\nnamespace = \"argo-events\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n
Verify argo-events pods are running.
$ kubectl get pods -n argo-events\nNAME READY STATUS RESTARTS AGE\nargo-events-controller-manager-bfb894cdb-k8hzn 1/1 Running 0 11m\n
"},{"location":"addons/argo-rollouts/","title":"Argo Rollouts","text":"Argo Rollouts is a Kubernetes controller and set of CRDs which provide advanced deployment capabilities such as blue-green, canary, canary analysis, experimentation, and progressive delivery features to Kubernetes.
"},{"location":"addons/argo-rollouts/#usage","title":"Usage","text":"Argo Rollouts can be deployed by enabling the add-on via the following.
enable_argo_rollouts = true\n
You can optionally customize the Helm chart that deploys Argo Rollouts via the following configuration.
enable_argo_rollouts = true\nargo_rollouts = {\nname = \"argo-rollouts\"\nchart_version = \"2.22.3\"\nrepository = \"https://argoproj.github.io/argo-helm\"\nnamespace = \"argo-rollouts\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n
Verify argo-rollouts pods are running.
$ kubectl get pods -n argo-rollouts\nNAME READY STATUS RESTARTS AGE\nargo-rollouts-5db5688849-x89zb 0/1 Running 0 11s\n
"},{"location":"addons/argo-workflows/","title":"Argo Workflows","text":"Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. Argo Workflows is implemented as a Kubernetes CRD (Custom Resource Definition).
"},{"location":"addons/argo-workflows/#usage","title":"Usage","text":"Argo Workflows can be deployed by enabling the add-on via the following.
enable_argo_workflows = true\n
You can optionally customize the Helm chart that deploys Argo Workflows via the following configuration.
enable_argo_workflows = true\nargo_workflows = {\nname = \"argo-workflows\"\nchart_version = \"0.28.2\"\nrepository = \"https://argoproj.github.io/argo-helm\"\nnamespace = \"argo-workflows\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n
Verify argo-workflows pods are running.
$ kubectl get pods -n argo-workflows\nNAME READY STATUS RESTARTS AGE\nargo-workflows-server-68988cd864-22zhr 1/1 Running 0 6m32s\nargo-workflows-workflow-controller-7ff7b5658d-9q44f 1/1 Running 0 6m32s\n
"},{"location":"addons/argocd/","title":"Argo CD","text":"Argo CD is a declarative, GitOps continuous delivery tool for Kubernetes.
"},{"location":"addons/argocd/#usage","title":"Usage","text":"Argo CD can be deployed by enabling the add-on via the following.
enable_argocd = true\n
You can optionally customize the Helm chart that deploys Argo CD via the following configuration.
enable_argocd = true\nargocd = {\nname = \"argocd\"\nchart_version = \"5.29.1\"\nrepository = \"https://argoproj.github.io/argo-helm\"\nnamespace = \"argocd\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n
Verify argocd pods are running.
$ kubectl get pods -n argocd\nNAME READY STATUS RESTARTS AGE\nargo-cd-argocd-application-controller-0 1/1 Running 0 146m\nargo-cd-argocd-applicationset-controller-678d85f77b-rmpcb 1/1 Running 0 146m\nargo-cd-argocd-dex-server-7b6c9b5969-zpqnl 1/1 Running 0 146m\nargo-cd-argocd-notifications-controller-6d489b99c9-j6fdw 1/1 Running 0 146m\nargo-cd-argocd-redis-59dd95f5b5-8fx74 1/1 Running 0 146m\nargo-cd-argocd-repo-server-7b9bd88c95-mh2fz 1/1 Running 0 146m\nargo-cd-argocd-server-6f9cfdd4d5-8mfpc 1/1 Running 0 146m\n
"},{"location":"addons/aws-cloudwatch-metrics/","title":"AWS CloudWatch Metrics","text":"Use CloudWatch Container Insights to collect, aggregate, and summarize metrics and logs from your containerized applications and microservices. CloudWatch automatically collects metrics for many resources, such as CPU, memory, disk, and network. Container Insights also provides diagnostic information, such as container restart failures, to help you isolate issues and resolve them quickly. You can also set CloudWatch alarms on metrics that Container Insights collects.
Container Insights collects data as performance log events using embedded metric format. These performance log events are entries that use a structured JSON schema that enables high-cardinality data to be ingested and stored at scale. From this data, CloudWatch creates aggregated metrics at the cluster, node, pod, task, and service level as CloudWatch metrics. The metrics that Container Insights collects are available in CloudWatch automatic dashboards, and also viewable in the Metrics section of the CloudWatch console.
"},{"location":"addons/aws-cloudwatch-metrics/#usage","title":"Usage","text":"aws-cloudwatch-metrics can be deployed by enabling the add-on via the following.
enable_aws_cloudwatch_metrics = true\n
You can also customize the Helm chart that deploys aws-cloudwatch-metrics
via the following configuration:
enable_aws_cloudwatch_metrics = true\naws_cloudwatch_metrics_irsa_policies = [\"IAM Policies\"]\naws_cloudwatch_metrics = {\nrole_policies = [\"IAM Policies\"] # extra policies in addition of CloudWatchAgentServerPolicy\nname = \"aws-cloudwatch-metrics\"\nrepository = \"https://aws.github.io/eks-charts\"\nchart_version = \"0.0.9\"\nnamespace = \"amazon-cloudwatch\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})] # The value `clusterName` is already set to the EKS cluster name, no need to specify here\n}\n
Verify aws-cloudwatch-metrics pods are running
$ kubectl get pods -n amazon-cloudwatch\n\nNAME READY STATUS RESTARTS AGE\naws-cloudwatch-metrics-2dt5h 1/1 Running 0 149m\n
"},{"location":"addons/aws-efs-csi-driver/","title":"AWS EFS CSI Driver","text":"This add-on deploys the AWS EFS CSI driver into an EKS cluster.
"},{"location":"addons/aws-efs-csi-driver/#usage","title":"Usage","text":"The AWS EFS CSI driver can be deployed by enabling the add-on via the following. Check out the full example to deploy an EKS Cluster with EFS backing the dynamic provisioning of persistent volumes.
enable_aws_efs_csi_driver = true\n
You can optionally customize the Helm chart that deploys the driver via the following configuration.
enable_aws_efs_csi_driver = true\n # Optional aws_efs_csi_driver_helm_config\naws_efs_csi_driver = {\nrepository = \"https://kubernetes-sigs.github.io/aws-efs-csi-driver/\"\nchart_version = \"2.4.1\"\n}\naws_efs_csi_driver {\nrole_policies = [\"<ADDITIONAL_IAM_POLICY_ARN>\"]\n}\n
Once deployed, you will be able to see a number of supporting resources in the kube-system
namespace.
$ kubectl get deployment efs-csi-controller -n kube-system\n\nNAME READY UP-TO-DATE AVAILABLE AGE\nefs-csi-controller 2/2 2 2 4m29s\n
$ kubectl get daemonset efs-csi-node -n kube-system\n\nNAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE\nefs-csi-node 3 3 3 3 3 beta.kubernetes.io/os=linux 4m32s\n
"},{"location":"addons/aws-efs-csi-driver/#validate-efs-csi-driver","title":"Validate EFS CSI Driver","text":"Follow the static provisioning example described here to validate the CSI driver is working as expected.
"},{"location":"addons/aws-for-fluentbit/","title":"AWS for Fluent Bit","text":"AWS provides a Fluent Bit image with plugins for both CloudWatch Logs and Kinesis Data Firehose. We recommend using Fluent Bit as your log router because it has a lower resource utilization rate than Fluentd.
"},{"location":"addons/aws-for-fluentbit/#usage","title":"Usage","text":"AWS for Fluent Bit can be deployed by enabling the add-on via the following.
enable_aws_for_fluentbit = true\n
You can optionally customize the Helm chart that deploys AWS for Fluent Bit via the following configuration.
enable_aws_for_fluentbit = true\naws_for_fluentbit_cw_log_group = {\ncreate = true\nuse_name_prefix = true # Set this to true to enable name prefix\nname_prefix = \"eks-cluster-logs-\"\nretention = 7\n}\naws_for_fluentbit = {\nname = \"aws-for-fluent-bit\"\nchart_version = \"0.1.24\"\nrepository = \"https://aws.github.io/eks-charts\"\nnamespace = \"kube-system\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n
"},{"location":"addons/aws-for-fluentbit/#verify-the-fluent-bit-setup","title":"Verify the Fluent Bit setup","text":"Verify aws-for-fluentbit pods are running.
$ kuebctl get pods -n kube-system\nNAME READY STATUS RESTARTS AGE\naws-for-fluent-bit-6kp66 1/1 Running 0 172m\n
Open the CloudWatch console at https://console.aws.amazon.com/cloudwatch/
In the navigation pane, choose Log groups.
Make sure that you're in the Region where you deployed Fluent Bit.
Check the list of log groups in the Region. You should see the following:
/aws/containerinsights/Cluster_Name/application\n\n/aws/containerinsights/Cluster_Name/host\n\n/aws/containerinsights/Cluster_Name/dataplane\n
Navigate to one of these log groups and check the Last Event Time for the log streams. If it is recent relative to when you deployed Fluent Bit, the setup is verified.
There might be a slight delay in creating the /dataplane log group. This is normal as these log groups only get created when Fluent Bit starts sending logs for that log group.
"},{"location":"addons/aws-fsx-csi-driver/","title":"AWS FSx CSI Driver","text":"This add-on deploys the Amazon FSx CSI Driver in to an Amazon EKS Cluster.
"},{"location":"addons/aws-fsx-csi-driver/#usage","title":"Usage","text":"The Amazon FSx CSI Driver can be deployed by enabling the add-on via the following.
enable_aws_fsx_csi_driver = true\n
"},{"location":"addons/aws-fsx-csi-driver/#helm-chart-customization","title":"Helm Chart customization","text":"You can optionally customize the Helm chart deployment using a configuration like the following.
enable_aws_fsx_csi_driver = true\naws_fsx_csi_driver = {\nnamespace = \"aws-fsx-csi-driver\"\nchart_version = \"1.6.0\"\nrole_policies = <ADDITIONAL_IAM_POLICY_ARN>\n}\n
You can find all available Helm Chart parameter values here
"},{"location":"addons/aws-fsx-csi-driver/#validation","title":"Validation","text":"Once deployed, you will be able to see a number of supporting resources in the kube-system
namespace.
$ kubectl -n kube-system get deployment fsx-csi-controller\n\nNAME READY UP-TO-DATE AVAILABLE AGE\nfsx-csi-controller 2/2 2 2 4m29s\n\n$ kubectl -n kube-system get pods -l app=fsx-csi-controller\nNAME READY STATUS RESTARTS AGE\nfsx-csi-controller-56c6d9bbb8-89cpc 4/4 Running 0 3m30s\nfsx-csi-controller-56c6d9bbb8-9wnlh 4/4 Running 0 3m30s\n
$ kubectl -n kube-system get daemonset fsx-csi-node\nNAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE\nfsx-csi-node 3 3 3 3 3 kubernetes.io/os=linux 5m27s\n\n$ kubectl -n kube-system get pods -l app=fsx-csi-node\nNAME READY STATUS RESTARTS AGE\nfsx-csi-node-7c5z6 3/3 Running 0 5m29s\nfsx-csi-node-d5q28 3/3 Running 0 5m29s\nfsx-csi-node-hlg8q 3/3 Running 0 5m29s\n
Create a StorageClass. Replace the SubnetID and the SecurityGroupID with your own values. More details here.
$ cat <<EOF | kubectl apply -f -\nkind: StorageClass\napiVersion: storage.k8s.io/v1\nmetadata:\n name: fsx-sc\nprovisioner: fsx.csi.aws.com\nparameters:\n subnetId: <YOUR_SUBNET_IDs>\n securityGroupIds: <YOUR_SG_ID>\n perUnitStorageThroughput: \"200\"\n deploymentType: PERSISTENT_1\nmountOptions:\n - flock\nEOF\n
$ kubect describe storageclass fsx-sc\nName: fsx-sc\nIsDefaultClass: No\nAnnotations: kubectl.kubernetes.io/last-applied-configuration={\"apiVersion\":\"storage.k8s.io/v1\",\"kind\":\"StorageClass\",\"metadata\":{\"annotations\":{},\"name\":\"fsx-sc\"},\"mountOptions\":null,\"parameters\":{\"deploymentType\":\"PERSISTENT_1\",\"perUnitStorageThroughput\":\"200\",\"securityGroupIds\":\"sg-q1w2e3r4t5y6u7i8o\",\"subnetId\":\"subnet-q1w2e3r4t5y6u7i8o\"},\"provisioner\":\"fsx.csi.aws.com\"}\nProvisioner: fsx.csi.aws.com\nParameters: deploymentType=PERSISTENT_1,perUnitStorageThroughput=200,securityGroupIds=sg-q1w2e3r4t5y6u7i8o,subnetId=subnet-q1w2e3r4t5y6u7i8o\nAllowVolumeExpansion: <unset>\nMountOptions: <none>\nReclaimPolicy: Delete\nVolumeBindingMode: Immediate\nEvents: <none>\n
Create a PVC.
$ cat <<EOF | kubectl apply -f -\napiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n name: fsx-claim\nspec:\n accessModes:\n - ReadWriteMany\n storageClassName: fsx-sc\n resources:\n requests:\n storage: 1200Gi\nEOF\n
Wait for the PV to be created and bound to your PVC.
$ kubectl get pvc\nNAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE\nfsx-claim Bound pvc-df385730-72d6-4b0c-8275-cc055a438760 1200Gi RWX fsx-sc 7m47s\n$ kubectl get pv\nNAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE\npvc-df385730-72d6-4b0c-8275-cc055a438760 1200Gi RWX Delete Bound default/fsx-claim fsx-sc 2m13s\n
"},{"location":"addons/aws-gateway-api-controller/","title":"AWS Gateway API Controller","text":"AWS Gateway API Controller lets you connect services across multiple Kubernetes clusters through the Kubernetes Gateway API interface. It is also designed to connect services running on EC2 instances, containers, and as serverless functions. It does this by leveraging Amazon VPC Lattice, which works with Kubernetes Gateway API calls to manage Kubernetes objects.
"},{"location":"addons/aws-gateway-api-controller/#usage","title":"Usage","text":"AWS Gateway API Controller can be deployed by enabling the add-on via the following.
enable_aws_gateway_api_controller = true\naws_gateway_api_controller = {\nrepository_username = data.aws_ecrpublic_authorization_token.token.user_name\nrepository_password = data.aws_ecrpublic_authorization_token.token.password\nset = [{\nname = \"clusterVpcId\"\nvalue = \"vpc-12345abcd\"\n}]\n}\n
You can optionally customize the Helm chart that deploys AWS Gateway API Controller via the following configuration.
enable_aws_gateway_api_controller = true\naws_gateway_api_controller = {\nname = \"aws-gateway-api-controller\"\nchart_version = \"v0.0.12\"\nrepository = \"oci://public.ecr.aws/aws-application-networking-k8s\"\nrepository_username = data.aws_ecrpublic_authorization_token.token.user_name\nrepository_password = data.aws_ecrpublic_authorization_token.token.password\nnamespace = \"aws-application-networking-system\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\nset = [{\nname = \"clusterVpcId\"\nvalue = \"vpc-12345abcd\"\n}]\n}\n
Verify aws-gateway-api-controller pods are running.
$ kubectl get pods -n aws-application-networking-system\nNAME READY STATUS RESTARTS AGE\naws-gateway-api-controller-aws-gateway-controller-chart-8f42q426 1/1 Running 0 40s\naws-gateway-api-controller-aws-gateway-controller-chart-8f4tbl9g 1/1 Running 0 71s\n
Deploy example GatewayClass
$ kubectl apply -f https://raw.githubusercontent.com/aws/aws-application-networking-k8s/main/examples/gatewayclass.yaml\ngatewayclass.gateway.networking.k8s.io/amazon-vpc-lattice created\n
Describe GatewayClass
$ kubectl describe gatewayclass\nName: amazon-vpc-lattice\nNamespace:\nLabels: <none>\nAnnotations: <none>\nAPI Version: gateway.networking.k8s.io/v1beta1\nKind: GatewayClass\nMetadata:\n Creation Timestamp: 2023-06-22T22:33:32Z\n Generation: 1\nResource Version: 819021\nUID: aac59195-8f37-4c23-a2a5-b0f363deda77\nSpec:\n Controller Name: application-networking.k8s.aws/gateway-api-controller\nStatus:\n Conditions:\n Last Transition Time: 2023-06-22T22:33:32Z\n Message: Accepted\n Observed Generation: 1\nReason: Accepted\n Status: True\n Type: Accepted\nEvents: <none>\n
"},{"location":"addons/aws-load-balancer-controller/","title":"AWS Load Balancer Controller.","text":"AWS Load Balancer Controller is a controller to help manage Elastic Load Balancers for a Kubernetes cluster. This Add-on deploys this controller in an Amazon EKS Cluster.
"},{"location":"addons/aws-load-balancer-controller/#usage","title":"Usage","text":"In order to deploy the AWS Load Balancer Controller Addon via EKS Blueprints Addons, reference the following parameters under the module.eks_blueprints_addons
.
module \"eks_blueprints_addons\" {\nenable_aws_load_balancer_controller = true\naws_load_balancer_controller = {\nset = [\n{\nname = \"vpcId\"\nvalue = module.vpc.vpc_id\n},\n{\nname = \"podDisruptionBudget.maxUnavailable\"\nvalue = 1\n},\n]\n}\n
"},{"location":"addons/aws-load-balancer-controller/#helm-chart-customization","title":"Helm Chart customization","text":"It's possible to customize your deployment using the Helm Chart parameters inside the aws_load_balancer_controller
configuration block:
aws_load_balancer_controller = {\nset = [\n{\nname = \"vpcId\"\nvalue = module.vpc.vpc_id\n},\n{\nname = \"podDisruptionBudget.maxUnavailable\"\nvalue = 1\n},\n{\nname = \"resources.requests.cpu\"\nvalue = 100m\n},\n{\nname = \"resources.requests.memory\"\nvalue = 128Mi\n},\n]\n}\n}\n
You can find all available Helm Chart parameter values here.
"},{"location":"addons/aws-load-balancer-controller/#validate","title":"Validate","text":"aws-load-balancer-controller
Pods were created in the kube-system
Namespace, as the following example.kubectl -n kube-system get pods | grep aws-load-balancer-controller\nNAMESPACE NAME READY STATUS RESTARTS AGE\nkube-system aws-load-balancer-controller-6cbdb58654-fvskt 1/1 Running 0 26m\nkube-system aws-load-balancer-controller-6cbdb58654-sc7dk 1/1 Running 0 26m\n
alb
IngressClass, pointing to an existing Service. In this example we'll use a Service called example-svc
.kubectl create ingress example-ingress --class alb --rule=\"/*=example-svc:80\" \\\n--annotation alb.ingress.kubernetes.io/scheme=internet-facing \\\n--annotation alb.ingress.kubernetes.io/target-type=ip\n
kubectl get ingress NAME CLASS HOSTS ADDRESS PORTS AGE\nexample-ingress alb * k8s-example-ingress-7e0d6f03e7-1234567890.us-west-2.elb.amazonaws.com 80 4m9s\n
"},{"location":"addons/aws-load-balancer-controller/#resources","title":"Resources","text":"GitHub Repo Helm Chart AWS Docs
"},{"location":"addons/aws-node-termination-handler/","title":"AWS Node Termination Handler","text":"This project ensures that the Kubernetes control plane responds appropriately to events that can cause your EC2 instance to become unavailable, such as EC2 maintenance events, EC2 Spot interruptions, ASG Scale-In, ASG AZ Rebalance, and EC2 Instance Termination via the API or Console. If not handled, your application code may not stop gracefully, take longer to recover full availability, or accidentally schedule work to nodes that are going down.
"},{"location":"addons/aws-node-termination-handler/#usage","title":"Usage","text":"AWS Node Termination Handler can be deployed by enabling the add-on via the following.
enable_aws_node_termination_handler = true\n
You can optionally customize the Helm chart that deploys AWS Node Termination Handler via the following configuration.
enable_aws_node_termination_handler = true\naws_node_termination_handler = {\nname = \"aws-node-termination-handler\"\nchart_version = \"0.21.0\"\nrepository = \"https://aws.github.io/eks-charts\"\nnamespace = \"aws-node-termination-handler\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n
Verify aws-node-termination-handler pods are running.
$ kubectl get pods -n aws-node-termination-handler\nNAME READY STATUS RESTARTS AGE\naws-node-termination-handler-6f598b6b89-6mqgk 1/1 Running 1 (22h ago) 26h\n
Verify SQS Queue is created.
$ aws sqs list-queues\n\n{\n\"QueueUrls\": [\n\"https://sqs.us-east-1.amazonaws.com/XXXXXXXXXXXXXX/aws_node_termination_handler20221123072051157700000004\"\n]\n}\n
Verify Event Rules are created.
$ aws event list-rules\n{\n[\n{\n\"Name\": \"NTH-ASGTerminiate-20230602191740664900000025\",\n \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTH-ASGTerminiate-20230602191740664900000025\",\n \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Instance-terminate Lifecycle Action\\\"],\\\"source\\\":[\\\"aws.autoscaling\\\"]}\",\n \"State\": \"ENABLED\",\n \"Description\": \"Auto scaling instance terminate event\",\n \"EventBusName\": \"default\"\n},\n {\n\"Name\": \"NTH-HealthEvent-20230602191740079300000022\",\n \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTH-HealthEvent-20230602191740079300000022\",\n \"EventPattern\": \"{\\\"detail-type\\\":[\\\"AWS Health Event\\\"],\\\"source\\\":[\\\"aws.health\\\"]}\",\n \"State\": \"ENABLED\",\n \"Description\": \"AWS health event\",\n \"EventBusName\": \"default\"\n},\n {\n\"Name\": \"NTH-InstanceRebalance-20230602191740077100000021\",\n \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTH-InstanceRebalance-20230602191740077100000021\",\n \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Instance Rebalance Recommendation\\\"],\\\"source\\\":[\\\"aws.ec2\\\"]}\",\n \"State\": \"ENABLED\",\n \"Description\": \"EC2 instance rebalance recommendation\",\n \"EventBusName\": \"default\"\n},\n {\n\"Name\": \"NTH-InstanceStateChange-20230602191740165000000024\",\n \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTH-InstanceStateChange-20230602191740165000000024\",\n \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Instance State-change Notification\\\"],\\\"source\\\":[\\\"aws.ec2\\\"]}\",\n \"State\": \"ENABLED\",\n \"Description\": \"EC2 instance state-change notification\",\n \"EventBusName\": \"default\"\n},\n {\n\"Name\": \"NTH-SpotInterrupt-20230602191740077100000020\",\n \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTH-SpotInterrupt-20230602191740077100000020\",\n \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Spot Instance Interruption Warning\\\"],\\\"source\\\":[\\\"aws.ec2\\\"]}\",\n \"State\": \"ENABLED\",\n \"Description\": \"EC2 spot instance interruption warning\",\n \"EventBusName\": \"default\"\n},\n {\n\"Name\": \"NTHASGTermRule\",\n \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTHASGTermRule\",\n \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Instance-terminate Lifecycle Action\\\"],\\\"source\\\":[\\\"aws.autoscaling\\\"]}\",\n \"State\": \"ENABLED\",\n \"EventBusName\": \"default\"\n},\n {\n\"Name\": \"NTHInstanceStateChangeRule\",\n \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTHInstanceStateChangeRule\",\n \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Instance State-change Notification\\\"],\\\"source\\\":[\\\"aws.ec2\\\"]}\",\n \"State\": \"ENABLED\",\n \"EventBusName\": \"default\"\n},\n {\n\"Name\": \"NTHRebalanceRule\",\n \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTHRebalanceRule\",\n \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Instance Rebalance Recommendation\\\"],\\\"source\\\":[\\\"aws.ec2\\\"]}\",\n \"State\": \"ENABLED\",\n \"EventBusName\": \"default\"\n},\n {\n\"Name\": \"NTHScheduledChangeRule\",\n \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTHScheduledChangeRule\",\n \"EventPattern\": \"{\\\"detail-type\\\":[\\\"AWS Health Event\\\"],\\\"source\\\":[\\\"aws.health\\\"]}\",\n \"State\": \"ENABLED\",\n \"EventBusName\": \"default\"\n},\n {\n\"Name\": \"NTHSpotTermRule\",\n \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTHSpotTermRule\",\n \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Spot Instance Interruption Warning\\\"],\\\"source\\\":[\\\"aws.ec2\\\"]}\",\n \"State\": \"ENABLED\",\n \"EventBusName\": \"default\"\n}\n]\n}\n
"},{"location":"addons/aws-private-ca-issuer/","title":"AWS Private CA Issuer","text":"AWS Private CA is an AWS service that can setup and manage private CAs, as well as issue private certificates. This add-on deploys the AWS Private CA Issuer as an external issuer to cert-manager that signs off certificate requests using AWS Private CA in an Amazon EKS Cluster.
"},{"location":"addons/aws-private-ca-issuer/#usage","title":"Usage","text":""},{"location":"addons/aws-private-ca-issuer/#pre-requisites","title":"Pre-requisites","text":"To deploy the AWS PCA, you need to install cert-manager first, refer to this documentation to do it through EKS Blueprints Addons.
"},{"location":"addons/aws-private-ca-issuer/#deployment","title":"Deployment","text":"With cert-manager deployed in place, you can deploy the AWS Private CA Issuer Add-on via EKS Blueprints Addons, reference the following parameters under the module.eks_blueprints_addons
.
module \"eks_blueprints_addons\" {\nenable_cert_manager = true\nenable_aws_privateca_issuer = true\naws_privateca_issuer = {\nacmca_arn = aws_acmpca_certificate_authority.this.arn\n}\n}\n
"},{"location":"addons/aws-private-ca-issuer/#helm-chart-customization","title":"Helm Chart customization","text":"It's possible to customize your deployment using the Helm Chart parameters inside the aws_load_balancer_controller
configuration block:
aws_privateca_issuer = {\nacmca_arn = aws_acmpca_certificate_authority.this.arn\nnamespace = \"aws-privateca-issuer\"\ncreate_namespace = true\n}\n
You can find all available Helm Chart parameter values here.
"},{"location":"addons/aws-private-ca-issuer/#validation","title":"Validation","text":"aws-privateca-issuer
and cert-manager
Namespace.kubectl get pods -n aws-privateca-issuer\nkubectl get pods -n cert-manager\n
certificate
status in it should be in Ready
state, and be pointing to a secret
created in the same Namespace.kubectl get certificate -o wide\nNAME READY SECRET ISSUER STATUS AGE\nexample True example-clusterissuer tls-with-aws-pca-issuer Certificate is up to date and has not expired 41m\n\nkubectl get secret example-clusterissuer\nNAME TYPE DATA AGE\nexample-clusterissuer kubernetes.io/tls 3 43m\n
"},{"location":"addons/aws-private-ca-issuer/#resources","title":"Resources","text":"GitHub Repo Helm Chart AWS Docs
"},{"location":"addons/cert-manager/","title":"Cert-Manager","text":"Cert-manager is a X.509 certificate controller for Kubernetes-like workloads. It will obtain certificates from a variety of Issuers, both popular public Issuers as well as private Issuers, and ensure the certificates are valid and up-to-date, and will attempt to renew certificates at a configured time before expiry. This Add-on deploys this controller in an Amazon EKS Cluster.
"},{"location":"addons/cert-manager/#usage","title":"Usage","text":"To deploy cert-manager Add-on via EKS Blueprints Addons, reference the following parameters under the module.eks_blueprints_addons
.
module \"eks_blueprints_addons\" {\nenable_cert_manager = true\n}\n
"},{"location":"addons/cert-manager/#helm-chart-customization","title":"Helm Chart customization","text":"It's possible to customize your deployment using the Helm Chart parameters inside the cert-manager
configuration block:
cert-manager = {\nchart_version = \"v1.11.1\"\nnamespace = \"cert-manager\"\ncreate_namespace = true\n}\n
You can find all available Helm Chart parameter values here
"},{"location":"addons/cert-manager/#validation","title":"Validation","text":"kubectl -n cert-manager get pods\nNAME READY STATUS RESTARTS AGE\ncert-manager-5989bcc87-96qvf 1/1 Running 0 2m49s\ncert-manager-cainjector-9b44ddb68-8c7b9 1/1 Running 0 2m49s\ncert-manager-webhook-776b65456-k6br4 1/1 Running 0 2m49s\n
apiVersion: cert-manager.io/v1\nkind: ClusterIssuer\nmetadata:\nname: selfsigned-cluster-issuer\nspec:\nselfSigned: {}\n
kubectl get clusterissuers -o wide selfsigned-cluster-issuer\nNAME READY STATUS AGE\nselfsigned-cluster-issuer True 3m\n
apiVersion: cert-manager.io/v1\nkind: Certificate\nmetadata:\nname: example\nnamespace: default\nspec:\nisCA: true\ncommonName: example\nsecretName: example-secret\nprivateKey:\nalgorithm: ECDSA\nsize: 256\nissuerRef:\nname: selfsigned-cluster-issuer\nkind: ClusterIssuer\ngroup: cert-manager.io\n
certificate
status in it should be in Ready
state, and be pointing to a secret
created in the same Namespace.kubectl get certificate -o wide\nNAME READY SECRET ISSUER STATUS AGE\nexample True example-secret selfsigned-cluster-issuer Certificate is up to date and has not expired 44s\n\nkubectl get secret example-secret\nNAME TYPE DATA AGE\nexample-secret kubernetes.io/tls 3 70s\n
"},{"location":"addons/cert-manager/#resources","title":"Resources","text":"GitHub Repo Helm Chart
"},{"location":"addons/cluster-autoscaler/","title":"Cluster Autoscaler","text":"The Kubernetes Cluster Autoscaler automatically adjusts the number of nodes in your cluster when pods fail or are rescheduled onto other nodes. The Cluster Autoscaler uses Auto Scaling groups. For more information, see Cluster Autoscaler on AWS.
"},{"location":"addons/cluster-autoscaler/#usage","title":"Usage","text":"Cluster Autoscaler can be deployed by enabling the add-on via the following.
enable_cluster_autoscaler = true\n
You can optionally customize the Helm chart that deploys Cluster Autoscaler via the following configuration.
enable_cluster_autoscaler = true\ncluster_autoscaler = {\nname = \"cluster-autoscaler\"\nchart_version = \"9.29.0\"\nrepository = \"https://kubernetes.github.io/autoscaler\"\nnamespace = \"kube-system\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n
Verify cluster-autoscaler pods are running.
$ kubectl get pods -n kube-system\nNAME READY STATUS RESTARTS AGE\ncluster-autoscaler-aws-cluster-autoscaler-7ff79bc484-pm8g9 1/1 Running 1 (2d ago) 2d5h\n
"},{"location":"addons/cluster-proportional-autoscaler/","title":"Cluster Proportional Autoscaler","text":"Horizontal cluster-proportional-autoscaler watches over the number of schedulable nodes and cores of the cluster and resizes the number of replicas for the required resource. This functionality may be desirable for applications that need to be autoscaled with the size of the cluster, such as CoreDNS and other services that scale with the number of nodes/pods in the cluster.
The cluster-proportional-autoscaler helps to scale the applications using deployment or replicationcontroller or replicaset. This is an alternative solution to Horizontal Pod Autoscaling. It is typically installed as a Deployment in your cluster.
Refer to the eks-best-practices-guides for addional configuration guidanance.
"},{"location":"addons/cluster-proportional-autoscaler/#usage","title":"Usage","text":"This add-on requires both enable_cluster_proportional_autoscaler
and cluster_proportional_autoscaler
as mandatory fields.
The example shows how to enable cluster-proportional-autoscaler
for CoreDNS Deployment
. CoreDNS deployment is not configured with HPA. So, this add-on helps to scale CoreDNS Add-on according to the size of the nodes and cores.
This Add-on can be used to scale any application with Deployment objects.
enable_cluster_proportional_autoscaler = true\ncluster_proportional_autoscaler = {\nvalues = [\n<<-EOT\n nameOverride: kube-dns-autoscaler\n # Formula for controlling the replicas. Adjust according to your needs\n # replicas = max( ceil( cores * 1/coresPerReplica ) , ceil( nodes * 1/nodesPerReplica ) )\n config:\n linear:\n coresPerReplica: 256\n nodesPerReplica: 16\n min: 1\n max: 100\n preventSinglePointFailure: true\n includeUnschedulableNodes: true\n # Target to scale. In format: deployment/*, replicationcontroller/* or replicaset/* (not case sensitive).\n options:\n target: deployment/coredns # Notice the target as `deployment/coredns`\n serviceAccount:\n create: true\n name: kube-dns-autoscaler\n podSecurityContext:\n seccompProfile:\n type: RuntimeDefault\n supplementalGroups: [65534]\n fsGroup: 65534\n resources:\n limits:\n cpu: 100m\n memory: 128Mi\n requests:\n cpu: 100m\n memory: 128Mi\n tolerations:\n - key: \"CriticalAddonsOnly\"\n operator: \"Exists\"\n description: \"Cluster Proportional Autoscaler for CoreDNS Service\"\n EOT\n]\n}\n
"},{"location":"addons/cluster-proportional-autoscaler/#expected-result","title":"Expected result","text":"The cluster-proportional-autoscaler
pod running in the kube-system
namespace.
kubectl -n kube-system get po -l app.kubernetes.io/instance=cluster-proportional-autoscaler\nNAME READY STATUS RESTARTS AGE\ncluster-proportional-autoscaler-kube-dns-autoscaler-d8dc8477xx7 1/1 Running 0 21h\n
The cluster-proportional-autoscaler-kube-dns-autoscaler
config map exists. kubectl -n kube-system get cm cluster-proportional-autoscaler-kube-dns-autoscaler\nNAME DATA AGE\ncluster-proportional-autoscaler-kube-dns-autoscaler 1 21h\n
"},{"location":"addons/cluster-proportional-autoscaler/#testing","title":"Testing","text":"To test that coredns
pods scale, first take a baseline of how many nodes the cluster has and how many coredns
pods are running.
kubectl get nodes\nNAME STATUS ROLES AGE VERSION\nip-10-0-19-243.ec2.internal Ready <none> 21h v1.26.4-eks-0a21954\nip-10-0-25-182.ec2.internal Ready <none> 21h v1.26.4-eks-0a21954\nip-10-0-40-138.ec2.internal Ready <none> 21h v1.26.4-eks-0a21954\nip-10-0-8-136.ec2.internal Ready <none> 21h v1.26.4-eks-0a21954\n\nkubectl get po -n kube-system -l k8s-app=kube-dns\nNAME READY STATUS RESTARTS AGE\ncoredns-7975d6fb9b-dlkdd 1/1 Running 0 21h\ncoredns-7975d6fb9b-xqqwp 1/1 Running 0 21h\n
Change the following parameters in the hcl code above so a scaling event can be easily triggered:
config:\nlinear:\ncoresPerReplica: 4\nnodesPerReplica: 2\nmin: 1\nmax: 4\n
and execute terraform apply
. Increase the managed node group desired size, in this example from 4 to 5. This can be done via the AWS Console.
Check that the new node came up and coredns
scaled up.
NAME STATUS ROLES AGE VERSION\nip-10-0-14-120.ec2.internal Ready <none> 10m v1.26.4-eks-0a21954\nip-10-0-19-243.ec2.internal Ready <none> 21h v1.26.4-eks-0a21954\nip-10-0-25-182.ec2.internal Ready <none> 21h v1.26.4-eks-0a21954\nip-10-0-40-138.ec2.internal Ready <none> 21h v1.26.4-eks-0a21954\nip-10-0-8-136.ec2.internal Ready <none> 21h v1.26.4-eks-0a21954\n\nkubectl get po -n kube-system -l k8s-app=kube-dns\nNAME READY STATUS RESTARTS AGE\ncoredns-7975d6fb9b-dlkdd 1/1 Running 0 21h\ncoredns-7975d6fb9b-ww64t 1/1 Running 0 10m\ncoredns-7975d6fb9b-xqqwp 1/1 Running 0 21h\n
"},{"location":"addons/external-dns/","title":"External DNS","text":"ExternalDNS makes Kubernetes resources discoverable via public DNS servers. Like KubeDNS, it retrieves a list of resources (Services, Ingresses, etc.) from the Kubernetes API to determine a desired list of DNS records. Unlike KubeDNS, however, it's not a DNS server itself, but merely configures other DNS providers accordingly\u2014e.g. AWS Route 53.
"},{"location":"addons/external-dns/#usage","title":"Usage","text":"External DNS can be deployed by enabling the add-on via the following.
enable_external_dns = true\n
You can optionally customize the Helm chart that deploys External DNS via the following configuration.
enable_external_dns = true\nexternal_dns = {\nname = \"external-dns\"\nchart_version = \"1.12.2\"\nrepository = \"https://kubernetes-sigs.github.io/external-dns/\"\nnamespace = \"external-dns\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\n}\nexternal_dns_route53_zone_arns = [\"XXXXXXXXXXXXXXXXXXXXXXX\"]\n
Verify external-dns pods are running.
$ kubectl get pods -n external-dns\nNAME READY STATUS RESTARTS AGE\nexternal-dns-849b89c675-ffnf6 1/1 Running 1 (2d ago) 2d5h\n
To further configure external-dns, refer to the examples:
External Secrets Operator is a Kubernetes operator that integrates external secret management systems like AWS Secrets Manager, HashiCorp Vault, Google Secrets Manager, Azure Key Vault, IBM Cloud Secrets Manager, and many more. The operator reads information from external APIs and automatically injects the values into a Kubernetes Secret.
"},{"location":"addons/external-secrets/#usage","title":"Usage","text":"External Secrets can be deployed by enabling the add-on via the following.
enable_external_secrets = true\n
You can optionally customize the Helm chart that deploys External Secrets via the following configuration.
enable_external_secrets = true\nexternal_secrets = {\nname = \"external-secrets\"\nchart_version = \"0.8.1\"\nrepository = \"https://charts.external-secrets.io\"\nnamespace = \"external-secrets\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n
Verify external-secrets pods are running.
$ kubectl get pods -n external-secrets\nNAME READY STATUS RESTARTS AGE\nexternal-secrets-67bfd5b47c-xc5xf 1/1 Running 1 (2d1h ago) 2d6h\nexternal-secrets-cert-controller-8f75c6f79-qcfx4 1/1 Running 1 (2d1h ago) 2d6h\nexternal-secrets-webhook-78f6bd456-76wmm 1/1 Running 1 (2d1h ago) 2d6h\n
"},{"location":"addons/fargate-fluentbit/","title":"Fargate FluentBit","text":"Amazon EKS on Fargate offers a built-in log router based on Fluent Bit. This means that you don't explicitly run a Fluent Bit container as a sidecar, but Amazon runs it for you. All that you have to do is configure the log router. The configuration happens through a dedicated ConfigMap, that is deployed via this Add-on.
"},{"location":"addons/fargate-fluentbit/#usage","title":"Usage","text":"To configure the Fargate Fluentbit ConfigMap via the EKS Blueprints Addons, just reference the following parameters under the module.eks_blueprints_addons
.
module \"eks_blueprints_addons\" {\nenable_fargate_fluentbit = true\nfargate_fluentbit = {\nflb_log_cw = true\n}\n}\n
It's possible to customize the CloudWatch Log Group parameters in the fargate_fluentbit_cw_log_group
configuration block:
fargate_fluentbit_cw_log_group = {\nname = \"existing-log-group\"\nname_prefix = \"dev-environment-logs\"\nretention_in_days = 7\nkms_key_id = \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"\nskip_destroy = true\n
"},{"location":"addons/fargate-fluentbit/#validation","title":"Validation","text":"aws-logging
configMap for Fargate Fluentbit was created.kubectl -n aws-observability get configmap aws-logging -o yaml\napiVersion: v1\ndata:\n filters.conf: |\n[FILTER]\nName parser\n Match *\n Key_Name log\n Parser regex\n Preserve_Key True\n Reserve_Data True\n flb_log_cw: \"true\"\noutput.conf: |\n[OUTPUT]\nName cloudwatch_logs\n Match *\n region us-west-2\n log_group_name /fargate-serverless/fargate-fluentbit-logs20230509014113352200000006\n log_stream_prefix fargate-logs-\n auto_create_group true\nparsers.conf: |\n[PARSER]\nName regex\n Format regex\n Regex ^(?<time>[^ ]+) (?<stream>[^ ]+) (?<logtag>[^ ]+) (?<message>.+)$\n Time_Key time\nTime_Format %Y-%m-%dT%H:%M:%S.%L%z\n Time_Keep On\n Decode_Field_As json message\nimmutable: false\nkind: ConfigMap\nmetadata:\n creationTimestamp: \"2023-05-08T21:14:52Z\"\nname: aws-logging\n namespace: aws-observability\n resourceVersion: \"1795\"\nuid: d822bcf5-a441-4996-857e-7fb1357bc07e\n
aws logs describe-log-groups --log-group-name-prefix \"/fargate-serverless/fargate-fluentbit\"\n{\n\"logGroups\": [\n{\n\"logGroupName\": \"/fargate-serverless/fargate-fluentbit-logs20230509014113352200000006\",\n \"creationTime\": 1683580491652,\n \"retentionInDays\": 90,\n \"metricFilterCount\": 0,\n \"arn\": \"arn:aws:logs:us-west-2:111122223333:log-group:/fargate-serverless/fargate-fluentbit-logs20230509014113352200000006:*\",\n \"storedBytes\": 0\n}\n]\n}\n
aws logs describe-log-streams --log-group-name \"/fargate-serverless/fargate-fluentbit-logs20230509014113352200000006\" --log-stream-name-prefix fargate-logs --query 'logStreams[].logStreamName'\n[\n\"fargate-logs-flblogs.var.log.fluent-bit.log\",\n \"fargate-logs-kube.var.log.containers.aws-load-balancer-controller-7f989fc6c-grjsq_kube-system_aws-load-balancer-controller-feaa22b4cdaa71ecfc8355feb81d4b61ea85598a7bb57aef07667c767c6b98e4.log\",\n \"fargate-logs-kube.var.log.containers.aws-load-balancer-controller-7f989fc6c-wzr46_kube-system_aws-load-balancer-controller-69075ea9ab3c7474eac2a1696d3a84a848a151420cd783d79aeef960b181567f.log\",\n \"fargate-logs-kube.var.log.containers.coredns-7b7bddbc85-8cxvq_kube-system_coredns-9e4f3ab435269a566bcbaa606c02c146ad58508e67cef09fa87d5c09e4ac0088.log\",\n \"fargate-logs-kube.var.log.containers.coredns-7b7bddbc85-gcjwp_kube-system_coredns-11016818361cd68c32bf8f0b1328f3d92a6d7b8cf5879bfe8b301f393cb011cc.log\"\n]\n
"},{"location":"addons/fargate-fluentbit/#resources","title":"Resources","text":"AWS Docs Fluent Bit for Amazon EKS on AWS Fargate Blog Post
"},{"location":"addons/ingress-nginx/","title":"Ingress Nginx","text":"This add-on installs Ingress Nginx Controller on Amazon EKS. The Ingress Nginx controller uses Nginx as a reverse proxy and load balancer.
Other than handling Kubernetes ingress objects, this ingress controller can facilitate multi-tenancy and segregation of workload ingresses based on host name (host-based routing) and/or URL Path (path based routing).
"},{"location":"addons/ingress-nginx/#usage","title":"Usage","text":"Ingress Nginx Controller can be deployed by enabling the add-on via the following.
enable_ingress_nginx = true\n
You can optionally customize the Helm chart that deploys ingress-nginx
via the following configuration.
enable_ingress_nginx = true\ningress_nginx = {\nname = \"ingress-nginx\"\nchart_version = \"4.6.1\"\nrepository = \"https://kubernetes.github.io/ingress-nginx\"\nnamespace = \"ingress-nginx\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n
Verify ingress-nginx pods are running.
$ kubectl get pods -n ingress-nginx\nNAME READY STATUS RESTARTS AGE\ningress-nginx-controller-f6c55fdc8-8bt2z 1/1 Running 0 44m\n
"},{"location":"addons/karpenter/","title":"Karpenter","text":""},{"location":"addons/karpenter/#prerequisites","title":"Prerequisites","text":"If deploying a node template that uses spot
, please ensure you have the Spot service linked role available in your account. You can run the following command to ensure this role is available:
aws iam create-service-linked-role --aws-service-name spot.amazonaws.com || true\n
"},{"location":"addons/karpenter/#validate","title":"Validate","text":"The following command will update the kubeconfig
on your local machine and allow you to interact with your EKS Cluster using kubectl
to validate the CoreDNS deployment for Fargate.
update-kubeconfig
command:aws eks --region <REGION> update-kubeconfig --name <CLUSTER_NAME>\n
kubectl get pods -n karpenter\n\n# Output should look similar to below\nNAME READY STATUS RESTARTS AGE\nkarpenter-6f97df4f77-5nqsk 1/1 Running 0 3m28s\nkarpenter-6f97df4f77-n7fkf 1/1 Running 0 3m28s\n
kubectl get nodes\n\n# Output should look similar to below\nNAME STATUS ROLES AGE VERSION\nfargate-ip-10-0-29-25.us-west-2.compute.internal Ready <none> 2m56s v1.26.3-eks-f4dc2c0\nfargate-ip-10-0-36-148.us-west-2.compute.internal Ready <none> 2m57s v1.26.3-eks-f4dc2c0\nfargate-ip-10-0-42-30.us-west-2.compute.internal Ready <none> 2m34s v1.26.3-eks-f4dc2c0\nfargate-ip-10-0-45-112.us-west-2.compute.internal Ready <none> 2m33s v1.26.3-eks-f4dc2c0\n
pause
deployment to demonstrate scaling:kubectl apply -f - <<EOF\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: inflate\nspec:\n replicas: 0\n selector:\n matchLabels:\n app: inflate\n template:\n metadata:\n labels:\n app: inflate\n spec:\n terminationGracePeriodSeconds: 0\n containers:\n - name: inflate\n image: public.ecr.aws/eks-distro/kubernetes/pause:3.7\n resources:\n requests:\n cpu: 1\nEOF\n
pause
deployment to see Karpenter respond by provisioning nodes to support the workload:kubectl scale deployment inflate --replicas 5\n# To view logs\n# kubectl logs -f -n karpenter -l app.kubernetes.io/name=karpenter -c controller\n
kubectl get nodes\n\n# Output should look similar to below\nNAME STATUS ROLES AGE VERSION\nfargate-ip-10-0-29-25.us-west-2.compute.internal Ready <none> 5m15s v1.26.3-eks-f4dc2c0\nfargate-ip-10-0-36-148.us-west-2.compute.internal Ready <none> 5m16s v1.26.3-eks-f4dc2c0\nfargate-ip-10-0-42-30.us-west-2.compute.internal Ready <none> 4m53s v1.26.3-eks-f4dc2c0\nfargate-ip-10-0-45-112.us-west-2.compute.internal Ready <none> 4m52s v1.26.3-eks-f4dc2c0\nip-10-0-1-184.us-west-2.compute.internal Ready <none> 26s v1.26.2-eks-a59e1f0 # <= new EC2 node launched\n
pause
deployment:kubectl delete deployment inflate\n
"},{"location":"addons/kube-prometheus-stack/","title":"Kube Prometheus Stack","text":"Kube Prometheus Stack is a collection of Kubernetes manifests, Grafana dashboards, and Prometheus rules combined with documentation and scripts to provide easy to operate end-to-end Kubernetes cluster monitoring with Prometheus using the Prometheus Operator.
"},{"location":"addons/kube-prometheus-stack/#usage","title":"Usage","text":"Kube Prometheus Stack can be deployed by enabling the add-on via the following.
enable_kube_prometheus_stack = true\n
You can optionally customize the Helm chart that deploys Kube Prometheus Stack via the following configuration.
enable_kube_prometheus_stack = true\nkube_prometheus_stack = {\nname = \"kube-prometheus-stack\"\nchart_version = \"45.10.1\"\nrepository = \"https://charts.external-secrets.io\"\nnamespace = \"kube-prometheus-stack\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n
Verify kube-prometheus-stack pods are running.
$ kubectl get pods -n external-secrets\nNAME READY STATUS RESTARTS AGE\nalertmanager-kube-prometheus-stack-alertmanager-0 2/2 Running 3 (2d2h ago) 2d7h\nkube-prometheus-stack-grafana-5c6cf88fd9-8wc9k 3/3 Running 3 (2d2h ago) 2d7h\nkube-prometheus-stack-kube-state-metrics-584d8b5d5f-s6p8d 1/1 Running 1 (2d2h ago) 2d7h\nkube-prometheus-stack-operator-c74ddccb5-8cprr 1/1 Running 1 (2d2h ago) 2d7h\nkube-prometheus-stack-prometheus-node-exporter-vd8lw 1/1 Running 1 (2d2h ago) 2d7h\nprometheus-kube-prometheus-stack-prometheus-0 2/2 Running 2 (2d2h ago) 2d7h\n
"},{"location":"addons/metrics-server/","title":"Metrics Server","text":"Metrics Server is a scalable, efficient source of container resource metrics for Kubernetes built-in autoscaling pipelines.
Metrics Server collects resource metrics from Kubelets and exposes them in Kubernetes apiserver through Metrics API for use by Horizontal Pod Autoscaler and Vertical Pod Autoscaler. Metrics API can also be accessed by kubectl top, making it easier to debug autoscaling pipelines.
"},{"location":"addons/metrics-server/#usage","title":"Usage","text":"Metrics Server can be deployed by enabling the add-on via the following.
enable_metrics_server = true\n
You can optionally customize the Helm chart that deploys External DNS via the following configuration.
enable_metrics_server = true\nmetrics_server = {\nname = \"metrics-server\"\nchart_version = \"3.10.0\"\nrepository = \"https://kubernetes-sigs.github.io/metrics-server/\"\nnamespace = \"kube-system\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n
Verify metrics-server pods are running.
$ kubectl get pods -n kube-system\nNAME READY STATUS RESTARTS AGE\nmetrics-server-6f9cdd486c-njh8b 1/1 Running 1 (2d2h ago) 2d7h\n
"},{"location":"addons/opa-gatekeeper/","title":"OPA Gatekeeper","text":"Gatekeeper is an admission controller that validates requests to create and update Pods on Kubernetes clusters, using the Open Policy Agent (OPA). Using Gatekeeper allows administrators to define policies with a constraint, which is a set of conditions that permit or deny deployment behaviors in Kubernetes.
For complete project documentation, please visit the Gatekeeper. For reference templates refer Templates
"},{"location":"addons/opa-gatekeeper/#usage","title":"Usage","text":"Gatekeeper can be deployed by enabling the add-on via the following.
enable_gatekeeper = true\n
You can also customize the Helm chart that deploys gatekeeper
via the following configuration:
enable_gatekeeper = true\ngatekeeper = {\nname = \"gatekeeper\"\nchart_version = \"3.12.0\"\nrepository = \"https://open-policy-agent.github.io/gatekeeper/charts\"\nnamespace = \"gatekeeper-system\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n
"},{"location":"addons/secrets-store-csi-driver-provider-aws/","title":"AWS Secrets Manager and Config Provider for Secret Store CSI Driver","text":"AWS offers two services to manage secrets and parameters conveniently in your code. AWS Secrets Manager allows you to easily rotate, manage, and retrieve database credentials, API keys, certificates, and other secrets throughout their lifecycle. AWS Systems Manager Parameter Store provides hierarchical storage for configuration data. The AWS provider for the Secrets Store CSI Driver allows you to make secrets stored in Secrets Manager and parameters stored in Parameter Store appear as files mounted in Kubernetes pods.
"},{"location":"addons/secrets-store-csi-driver-provider-aws/#usage","title":"Usage","text":"AWS Secrets Store CSI Driver can be deployed by enabling the add-on via the following.
enable_secrets_store_csi_driver = true\nenable_secrets_store_csi_driver_provider_aws = true\n
You can optionally customize the Helm chart via the following configuration.
enable_secrets_store_csi_driver = true\nenable_secrets_store_csi_driver_provider_aws = true\nsecrets_store_csi_driver_provider_aws = {\nname = \"secrets-store-csi-driver\"\nchart_version = \"0.3.2\"\nrepository = \"https://aws.github.io/secrets-store-csi-driver-provider-aws\"\nnamespace = \"kube-system\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n
Verify metrics-server pods are running.
$ kubectl get pods -n kube-system\nNAME READY STATUS RESTARTS AGE\nsecrets-store-csi-driver-9l2z8 3/3 Running 1 (2d5h ago) 2d9h\nsecrets-store-csi-driver-provider-aws-2qqkk 1/1 Running 1 (2d5h ago) 2d9h\n
"},{"location":"addons/velero/","title":"Velero","text":"Velero is an open source tool to safely backup and restore, perform disaster recovery, and migrate Kubernetes cluster resources and persistent volumes.
Velero can be deployed by enabling the add-on via the following.
enable_velero = true\nvelero_backup_s3_bucket = \"<YOUR_BUCKET_NAME>\"\nvelero = {\ns3_backup_location = \"<YOUR_S3_BUCKET_ARN>[/prefix]\"\n}\n
You can also customize the Helm chart that deploys velero
via the following configuration:
enable_velero = true\nvelero = {\nname = \"velero\"\ndescription = \"A Helm chart for velero\"\nchart_version = \"3.1.6\"\nrepository = \"https://vmware-tanzu.github.io/helm-charts/\"\nnamespace = \"velero\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n
To see a working example, see the stateful
example blueprint.
update-kubeconfig
command:aws eks --region <REGION> update-kubeconfig --name <CLUSTER_NAME>\n
kubectl get all -n velero\n\n# Output should look similar to below\nNAME READY STATUS RESTARTS AGE\npod/velero-7b8994d56-z89sl 1/1 Running 0 25h\n\nNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE\nservice/velero ClusterIP 172.20.20.118 <none> 8085/TCP 25h\n\nNAME READY UP-TO-DATE AVAILABLE AGE\ndeployment.apps/velero 1/1 1 1 25h\n\nNAME DESIRED CURRENT READY AGE\nreplicaset.apps/velero-7b8994d56 1 1 1 25h\n
velero backup-location get\n\n# Output should look similar to below\nNAME PROVIDER BUCKET/PREFIX PHASE LAST VALIDATED ACCESS MODE DEFAULT\ndefault aws stateful-20230503175301619800000005/backups Available 2023-05-04 15:15:00 -0400 EDT ReadWrite true\n
kubectl create namespace backupdemo\nkubectl run nginx --image=nginx -n backupdemo\n
velero backup create backup1 --include-namespaces backupdemo\n\n# Output should look similar to below\nBackup request \"backup1\" submitted successfully.\nRun `velero backup describe backup1` or `velero backup logs backup1` for more details.\n
velero backup describe backup1\n\n# Output should look similar to below\nName: backup1\nNamespace: velero\nLabels: velero.io/storage-location=default\nAnnotations: velero.io/source-cluster-k8s-gitversion=v1.26.2-eks-a59e1f0\n velero.io/source-cluster-k8s-major-version=1\nvelero.io/source-cluster-k8s-minor-version=26+\n\nPhase: Completed\n\nNamespaces:\n Included: backupdemo\n Excluded: <none>\n\nResources:\n Included: *\n Excluded: <none>\n Cluster-scoped: auto\n\nLabel selector: <none>\n\nStorage Location: default\n\nVelero-Native Snapshot PVs: auto\n\nTTL: 720h0m0s\n\nCSISnapshotTimeout: 10m0s\nItemOperationTimeout: 0s\n\nHooks: <none>\n\nBackup Format Version: 1.1.0\n\nStarted: 2023-05-04 15:16:31 -0400 EDT\nCompleted: 2023-05-04 15:16:33 -0400 EDT\n\nExpiration: 2023-06-03 15:16:31 -0400 EDT\n\nTotal items to be backed up: 9\nItems backed up: 9\nVelero-Native Snapshots: <none included>\n
kubectl delete namespace backupdemo\n
velero restore create --from-backup backup1\n
kubectl get all -n backupdemo\n\n# Output should look similar to below\nNAME READY STATUS RESTARTS AGE\npod/nginx 1/1 Running 0 21s\n
"},{"location":"addons/vertical-pod-autoscaler/","title":"Vertical Pod Autoscaler","text":"VPA Vertical Pod Autoscaler (VPA) automatically adjusts the CPU and memory reservations for your pods to help \"right size\" your applications. When configured, it will automatically request the necessary reservations based on usage and thus allow proper scheduling onto nodes so that the appropriate resource amount is available for each pod. It will also maintain ratios between limits and requests that were specified in initial container configuration.
NOTE: Metrics Server add-on is a dependency for this addon
"},{"location":"addons/vertical-pod-autoscaler/#usage","title":"Usage","text":"This step deploys the Vertical Pod Autoscaler with default Helm Chart config
enable_vpa = true\nenable_metrics_server = true\n
You can also customize the Helm chart that deploys vpa
via the following configuration:
enable_vpa = true\nenable_metrics_server = true\nvpa = {\nname = \"vpa\"\nchart_version = \"1.7.5\"\nrepository = \"https://charts.fairwinds.com/stable\"\nnamespace = \"vpa\"\nvalues = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n
"}]}
\ No newline at end of file
diff --git a/main/sitemap.xml b/main/sitemap.xml
index 16657dee..3df86148 100644
--- a/main/sitemap.xml
+++ b/main/sitemap.xml
@@ -2,157 +2,157 @@