diff --git a/README.md b/README.md index b0767eb..16ee258 100644 --- a/README.md +++ b/README.md @@ -95,10 +95,6 @@ Please refer to each module documentation and the [examples](examples/) folder f -## Contributing - -Before contributing, please read first the [Contributing Guidelines](docs/CONTRIBUTING.md). - ### Reporting Issues In case you experience any problem with the module, please [open a new issue](https://github.com/sighupio/fury-eks-installer/issues/new). diff --git a/docs/upgrades/v1.11-to-v2.0.0.md b/docs/upgrades/v1.11-to-v2.0.0.md deleted file mode 100644 index d80f740..0000000 --- a/docs/upgrades/v1.11-to-v2.0.0.md +++ /dev/null @@ -1,131 +0,0 @@ -# Upgrade from v1.11.x to v2.0.0 - -In version 2.0.0 of `fury-eks-installer`, we split the vpc-and-vpn module in two, to allow the creation of a VPC and VPN separately, as the latter became optional with the introduction of public EKS clusters. -We also added the possibility to configure the ability to access the Kubernetes API endpoint privately, publicly or both. - -## Migrate from the old vpc-and-vpn module to the new vpc and vpn modules - -Ensure you have terraform 1.3 or greater installed. - -Enter the directory of the terraform module that is using the vpc-and-vpn module - -```sh -cd /path/to/module -``` - -If you have the terraform state stored locally, make a backup of it. - -If you have your state saved remotely(eg: on S3), you should skip this step. - -```sh -cp /path/to/terraform.tfstate /path/to/terraform.tfstate.bak -``` - -With that out of the way, you should now rewrite your terraform configuration to use the new vpc and vpn modules, and get rid of the old vpc-and-vpn module. - -Also remember to asjust the variables for each module, as the new modules have added and changed some. - -Once you have done that, you can run `terraform init` to download the new modules, and `terraform plan` to see what changes will be applied, eg: - -```sh -terraform init -upgrade -terraform plan -input=false -refresh=true -lock=false -var-file=your.tfvars -``` - -Now it's time to move the state objects to match the new modules. -Assuming the old module was named "vpc-and-vpn", and the new modules are named "vpc" and "vpn", you can run the following commands to fix the state: - -```sh -terraform state mv module.vpc-and-vpn.module.vpc.aws_eip.nat[0] module.vpc[0].module.vpc.aws_eip.nat[0] -terraform state mv module.vpc-and-vpn.module.vpc.aws_internet_gateway.this[0] module.vpc[0].module.vpc.aws_internet_gateway.this[0] -terraform state mv module.vpc-and-vpn.module.vpc.aws_nat_gateway.this[0] module.vpc[0].module.vpc.aws_nat_gateway.this[0] -terraform state mv module.vpc-and-vpn.module.vpc.aws_route_table_association.private[0] module.vpc[0].module.vpc.aws_route_table_association.private[0] -terraform state mv module.vpc-and-vpn.module.vpc.aws_route_table_association.private[1] module.vpc[0].module.vpc.aws_route_table_association.private[1] -terraform state mv module.vpc-and-vpn.module.vpc.aws_route_table_association.private[2] module.vpc[0].module.vpc.aws_route_table_association.private[2] -terraform state mv module.vpc-and-vpn.module.vpc.aws_route_table_association.public[0] module.vpc[0].module.vpc.aws_route_table_association.public[0] -terraform state mv module.vpc-and-vpn.module.vpc.aws_route_table_association.public[1] module.vpc[0].module.vpc.aws_route_table_association.public[1] -terraform state mv module.vpc-and-vpn.module.vpc.aws_route_table_association.public[2] module.vpc[0].module.vpc.aws_route_table_association.public[2] -terraform state mv module.vpc-and-vpn.module.vpc.aws_route_table.private[0] module.vpc[0].module.vpc.aws_route_table.private[0] -terraform state mv module.vpc-and-vpn.module.vpc.aws_route_table.public[0] module.vpc[0].module.vpc.aws_route_table.public[0] -terraform state mv module.vpc-and-vpn.module.vpc.aws_route.private_nat_gateway[0] module.vpc[0].module.vpc.aws_route.private_nat_gateway[0] -terraform state mv module.vpc-and-vpn.module.vpc.aws_route.public_internet_gateway[0] module.vpc[0].module.vpc.aws_route.public_internet_gateway[0] -terraform state mv module.vpc-and-vpn.module.vpc.aws_subnet.private[0] module.vpc[0].module.vpc.aws_subnet.private[0] -terraform state mv module.vpc-and-vpn.module.vpc.aws_subnet.private[1] module.vpc[0].module.vpc.aws_subnet.private[1] -terraform state mv module.vpc-and-vpn.module.vpc.aws_subnet.private[2] module.vpc[0].module.vpc.aws_subnet.private[2] -terraform state mv module.vpc-and-vpn.module.vpc.aws_subnet.public[0] module.vpc[0].module.vpc.aws_subnet.public[0] -terraform state mv module.vpc-and-vpn.module.vpc.aws_subnet.public[1] module.vpc[0].module.vpc.aws_subnet.public[1] -terraform state mv module.vpc-and-vpn.module.vpc.aws_subnet.public[2] module.vpc[0].module.vpc.aws_subnet.public[2] -terraform state mv module.vpc-and-vpn.module.vpc.aws_vpc.this[0] module.vpc[0].module.vpc.aws_vpc.this[0] - -terraform state mv module.vpc-and-vpn.aws_eip_association.vpn[0] module.vpn[0].aws_eip_association.vpn[0] -terraform state mv module.vpc-and-vpn.aws_eip.vpn[0] module.vpn[0].aws_eip.vpn[0] -terraform state mv module.vpc-and-vpn.aws_iam_access_key.furyagent module.vpn[0].aws_iam_access_key.furyagent -terraform state mv module.vpc-and-vpn.aws_iam_policy_attachment.furyagent module.vpn[0].aws_iam_policy_attachment.furyagent -terraform state mv module.vpc-and-vpn.aws_iam_policy.furyagent module.vpn[0].aws_iam_policy.furyagent -terraform state mv module.vpc-and-vpn.aws_iam_user.furyagent module.vpn[0].aws_iam_user.furyagent -terraform state mv module.vpc-and-vpn.aws_instance.vpn[0] module.vpn[0].aws_instance.vpn[0] -terraform state mv module.vpc-and-vpn.aws_s3_bucket.furyagent module.vpn[0].aws_s3_bucket.furyagent -terraform state mv module.vpc-and-vpn.aws_security_group_rule.vpn module.vpn[0].aws_security_group_rule.vpn -terraform state mv module.vpc-and-vpn.aws_security_group_rule.vpn_egress module.vpn[0].aws_security_group_rule.vpn_egress -terraform state mv module.vpc-and-vpn.aws_security_group_rule.vpn_ssh module.vpn[0].aws_security_group_rule.vpn_ssh -terraform state mv module.vpc-and-vpn.aws_security_group.vpn module.vpn[0].aws_security_group.vpn -terraform state mv module.vpc-and-vpn.local_file.furyagent module.vpn[0].local_file.furyagent -terraform state mv module.vpc-and-vpn.local_file.sshkeys module.vpn[0].local_file.sshkeys -terraform state mv module.vpc-and-vpn.null_resource.init module.vpn[0].null_resource.init -terraform state mv module.vpc-and-vpn.null_resource.ssh_users module.vpn[0].null_resource.ssh_users -``` - -Verify the plan once again, making sure the remaining changes you are going to apply shortly are safe. - -```sh -terraform plan -input=false -refresh=true -lock=false -var-file=your.tfvars -``` - -The changes you can expect should look like this: - -```sh - # module.vpn[0].aws_instance.vpn[0] will be updated in-place - ~ resource "aws_instance" "vpn" { - id = "i-09e4553cae2b9541d" - ~ tags = { - + "Name" = "public-4-vpn-0" - } - ~ tags_all = { - + "Name" = "public-4-vpn-0" - } - } - - # module.vpn[0].local_file.furyagent will be created - + resource "local_file" "furyagent" { - + content = (sensitive) - + directory_permission = "0777" - + file_permission = "0777" - + filename = "./secrets/furyagent.yml" - + id = (known after apply) - } - - # module.vpn[0].local_file.sshkeys will be created - + resource "local_file" "sshkeys" { - + content = <<-EOT - users: - - name: your-user - user_id: your-user - EOT - + directory_permission = "0777" - + file_permission = "0777" - + filename = "./ssh-users.yml" - + id = (known after apply) -``` - -If everything looks correct, you can proceed to apply the changes: - -```sh -terraform apply -input=false -refresh=true -lock=false -var-file=your.tfvars -``` - -If everything goes well and you have the terraform state stored locally, you can now clean up the state backups: -do that only when you are sure that the state has been migrated successfully. - -```sh -rm terraform.tfstate.*.backup -``` diff --git a/docs/upgrades/v1.11.x-to-v2.0.x.md b/docs/upgrades/v1.11.x-to-v2.0.x.md new file mode 100644 index 0000000..abcb77e --- /dev/null +++ b/docs/upgrades/v1.11.x-to-v2.0.x.md @@ -0,0 +1,577 @@ +# Upgrade from v1.11.x to v2.0.x + +In version 2.0 of `fury-eks-installer`, we split the vpc-and-vpn module in two, to allow the creation of a VPC and VPN separately, as the latter became optional with the introduction of public EKS clusters. +We also added the possibility to configure the ability to access the Kubernetes API endpoint privately, publicly or both. +Last but not least, we upgraded Terraform from 0.15.x to 1.3+, also bumping some of the providers and modules versions. + +> ⛔️ **IMPORTANT** +> we strongly recommend reading the whole guide before starting the upgrade process to identify possible blockers. + +## Migrate to v2 + +When starting the migration you should have something like the following code in your codebase: + +```hcl +terraform { + required_version = "~> 0.15.4" + required_providers { + aws = "~> 3.56.0" + external = "~> 2.0.0" + local = "~> 2.0.0" + null = "~> 3.0.0" + } +} + +module "vpc_and_vpn" { + source = "../modules/vpc-and-vpn" + + name = "fury-eks" # make sure to use the same name value as cluster name + + network_cidr = "10.195.0.0/16" + public_subnetwork_cidrs = ["10.195.1.0/24", "10.195.2.0/24", "10.195.3.0/24"] + private_subnetwork_cidrs = ["10.195.101.0/24", "10.195.102.0/24", "10.195.103.0/24"] + tags = {} + vpn_dhparams_bits = 2048 + vpn_instance_disk_size = 50 + vpn_instance_type = "t3.micro" + vpn_instances = 2 + vpn_operator_cidrs = ["0.0.0.0/0"] + vpn_operator_name = "sighup" + vpn_port = 1194 + vpn_subnetwork_cidr = "10.195.201.0/24" + vpn_ssh_users = ["octocat"] +} + +provider "aws" { + region = "eu-west-1" +} + +variable "ssh_public_key" { + type = string + description = "SSH public key to be used to access the cluster nodes" +} + +module "fury" { + source = "../modules/eks" + + cluster_name = "fury-eks" # make sure to use the same name you used in the VPC and VPN module + cluster_version = "1.24" + + network = module.vpc_and_vpn.vpc_id + subnetworks = module.vpc_and_vpn.private_subnets + + ssh_public_key = var.ssh_public_key + dmz_cidr_range = "10.195.0.0/16" + + node_pools = [ + { + name : "m5-node-pool" + version : null # To use same value as cluster_version + min_size : 1 + max_size : 2 + instance_type : "m5.large" + spot_instance: true + volume_size : 100 + subnetworks : null + eks_target_group_arns : null + additional_firewall_rules : [ + { + name : "Debug 1" + direction : "ingress" + cidr_block : "0.0.0.0/0" + protocol : "TCP" + ports : "80-80" + tags : { + "hello" : "tag", + "cluster-tags" : "my-value-OVERRIDE-1" + } + } + ] + labels : { + "node.kubernetes.io/role" : "app" + "sighup.io/fury-release" : "v1.24.0" + } + taints : [] + tags : { + "node-tags" : "exists" + } + # max_pods : null # To use default EKS setting set it to null or do not set it + }, + { + name : "m5-node-pool-spot" + version : null # To use same value as cluster_version + min_size : 1 + max_size : 2 + instance_type : "m5.large" + spot_instance : true # optionally create spot instances + # os : "ami-0caf35bc73450c396" # optionally define a custom AMI + volume_size : 100 + subnetworks : null + eks_target_group_arns : null + additional_firewall_rules : [ + { + name : "Debug 2" + direction : "ingress" + cidr_block : "0.0.0.0/0" + protocol : "TCP" + ports : "80-80" + tags : { + "hello" : "tag", + "cluster-tags" : "my-value-OVERRIDE-2" + } + } + ] + labels : { + "node.kubernetes.io/role" : "app" + "sighup.io/fury-release" : "v1.24.0" + } + taints : [] + tags : { + "node-tags" : "exists" + } + # max_pods : null # To use default EKS setting set it to null or do not set it + }, + ] + + tags = { + Environment: "kfd-development" + issue: "https://github.com/sighupio/product-management/issues/195" + description: "testing fury-eks-installer from v1 to v2" + } + + eks_map_users = [] + eks_map_roles = [] + eks_map_accounts = [] +} +``` + +The first step consists in refactoring it to look something like the code below. +Note a few things: the updated versions and constraints, the `vpc-and-vpn` module that has been split in two, allowing for separate handling of vpc and vpn. On top of that, we changed a few variable names and types, and we cleaned up a bit the modules interfaces. + +```hcl +terraform { + required_version = "~> 1.4" + required_providers { + local = "~> 2.4.0" + null = "~> 3.2.1" + aws = "~> 3.76.1" + external = "~> 2.3.1" + kubernetes = "~> 1.13.4" + } +} + +module "vpc" { + source = "../modules/vpc" + + name = "fury-eks" + + # renamed from `network_cidr` + cidr = "10.195.0.0/16" + # New variable to support multiple cluster within the same subnets + names_of_kubernetes_cluster_integrated_with_subnets = [ + "fury-eks" + ] + + single_nat_gateway = true + one_nat_gateway_per_az = false + + public_subnetwork_cidrs = ["10.195.1.0/24", "10.195.2.0/24", "10.195.3.0/24"] + private_subnetwork_cidrs = ["10.195.101.0/24", "10.195.102.0/24", "10.195.103.0/24"] + tags = {} +} + + +module "vpn" { + source = "../modules/vpn" + + name = "fury-eks" + + # Added variable to set `vpc` ID + vpc_id = module.vpc.vpc_id + public_subnets = module.vpc.public_subnets + + vpn_bucket_name_prefix = "fury-eks-bootstrap-bucket-" + + tags = {} + vpn_dhparams_bits = 2048 + vpn_instance_disk_size = 50 + vpn_instance_type = "t3.micro" + vpn_instances = 2 + vpn_operator_cidrs = ["0.0.0.0/0"] + vpn_operator_name = "sighup" + vpn_port = 1194 + vpn_subnetwork_cidr = "10.195.201.0/24" + vpn_ssh_users = ["octocat"] +} + +# Add the kubernetes provider +provider "kubernetes" { + host = data.aws_eks_cluster.fury.endpoint + cluster_ca_certificate = base64decode(data.aws_eks_cluster.fury.certificate_authority[0].data) + token = data.aws_eks_cluster_auth.fury.token + load_config_file = false +} + +# Add the EKS data fetcher +data "aws_eks_cluster" "fury" { + name = module.fury.cluster_id +} + +# Add the EKS auth data fetcher +data "aws_eks_cluster_auth" "fury" { + name = module.fury.cluster_id +} + +module "fury" { + source = "../modules/eks" + + cluster_name = "fury-eks" + cluster_version = "1.24" + + # Renamed from `network` + vpc_id = module.vpc.vpc_id + # renamed from `subnetworks` + subnets = module.vpc.private_subnets + + ssh_public_key = var.ssh_public_key + # renamed from `dmz_cidr_range` + ssh_to_nodes_allowed_cidr_blocks = [module.vpc.vpc_cidr_block] + cluster_endpoint_private_access_cidrs = [module.vpc.vpc_cidr_block] + + node_pools = [ + { + name : "m5-node-pool" + version : null # To use same value as cluster_version + min_size : 1 + max_size : 2 + instance_type : "m5.large" + spot_instance : true + volume_size : 100 + subnetworks : null + eks_target_group_arns : null + # From list to object + additional_firewall_rules : { + cidr_blocks = [ + { + name : "Debug 1" + #Renamed from `direction` + type : "ingress" + # moved from `cidr_block` to `cidr_blocks` + cidr_blocks : ["0.0.0.0/0"] + protocol : "TCP" + # moved from `ports` to `from_port` and `to_port` + from_port : 80 + to_port : 80 + tags : { + "hello" : "tag", + "cluster-tags" : "my-value-OVERRIDE-1" + } + } + ] + } + labels : { + "node.kubernetes.io/role" : "app" + "sighup.io/fury-release" : "v1.24.0" + } + taints : [] + tags : { + "node-tags" : "exists" + } + # max_pods : null # To use default EKS setting set it to null or do not set it + }, + { + name : "m5-node-pool-spot" + version : null # To use same value as cluster_version + min_size : 1 + max_size : 2 + instance_type : "m5.large" + spot_instance : true # optionally create spot instances + # os : "ami-0caf35bc73450c396" # optionally define a custom AMI + volume_size : 100 + subnetworks : null + eks_target_group_arns : null + additional_firewall_rules : { + cidr_blocks = [ + { + name : "Debug 2" + type : "ingress" + cidr_blocks : ["0.0.0.0/0"] + protocol : "TCP" + from_port : 80 + to_port: 80 + tags : { + "hello" : "tag", + "cluster-tags" : "my-value-OVERRIDE-2" + } + } + ] + } + labels : { + "node.kubernetes.io/role" : "app" + "sighup.io/fury-release" : "v1.24.0" + } + taints : [] + tags : { + "node-tags" : "exists" + } + # max_pods : null # To use default EKS setting set it to null or do not set it + }, + ] + + tags = { + Environment : "kfd-development" + } + + eks_map_users = [] + eks_map_roles = [] + eks_map_accounts = [] +} +``` + +Once you verified everything is refactored correctly, you can start operating on the infrastructure. +You'll need to upgrade the dependencies and move some resources in your terraform state to match the new structure of the modules. + +> ⛔️ **IMPORTANT** +> Make sure you have a backup of your terraform state before proceeding with the following commands: terraform will create them on its own, so make sure to keep them safe, or make an extra copy on your own and store it in a safe location. + +The following two lists of commands will guide you through the whole process: + +# ON VPC-and-VPN root module +```shell +terraform init -upgrade + +export VPC_AND_VPN_MODULE_NAME=vpc_and_vpn +export VPC_MODULE_NAME=vpc +export VPN_MODULE_NAME=vpn +#export VPN_INSTANCES=2 +#export LENGTH_PRIVATE_SUBNETS=3 +#export LENGTH_PUBLIC_SUBNETS=3 + +echo "terraform state mv 'module.${VPC_AND_VPN_MODULE_NAME}.module.vpc.aws_vpc.this[0]' 'module.${VPC_MODULE_NAME}.module.vpc.aws_vpc.this[0]'" | sh + +for COUNT in {0..$((${VPN_INSTANCES:-2}-1))}; do + echo "terraform state mv 'module.${VPC_AND_VPN_MODULE_NAME}.aws_eip_association.vpn[$COUNT]' 'module.${VPN_MODULE_NAME}.aws_eip_association.vpn[$COUNT]'" | sh + echo "terraform state mv 'module.${VPC_AND_VPN_MODULE_NAME}.aws_instance.vpn[$COUNT]' 'module.${VPN_MODULE_NAME}.aws_instance.vpn[$COUNT]'" | sh + echo "terraform state mv 'module.${VPC_AND_VPN_MODULE_NAME}.aws_eip.vpn[$COUNT]' 'module.${VPN_MODULE_NAME}.aws_eip.vpn[$COUNT]'" | sh +done + +echo "terraform state mv 'module.${VPC_AND_VPN_MODULE_NAME}.module.vpc.aws_route_table.private[0]' 'module.${VPC_MODULE_NAME}.module.vpc.aws_route_table.private[0]'" | sh +for COUNT in {0..$((${LENGTH_PRIVATE_SUBNETS:-3}-1))}; do + echo "terraform state mv 'module.${VPC_AND_VPN_MODULE_NAME}.module.vpc.aws_route_table_association.private[$COUNT]' 'module.${VPC_MODULE_NAME}.module.vpc.aws_route_table_association.private[$COUNT]'" | sh + echo "terraform state mv 'module.${VPC_AND_VPN_MODULE_NAME}.module.vpc.aws_subnet.private[$COUNT]' 'module.${VPC_MODULE_NAME}.module.vpc.aws_subnet.private[$COUNT]'" | sh +done + +echo "terraform state mv 'module.${VPC_AND_VPN_MODULE_NAME}.module.vpc.aws_route_table.public[0]' 'module.${VPC_MODULE_NAME}.module.vpc.aws_route_table.public[0]'" | sh +for COUNT in {0..$((${LENGTH_PUBLIC_SUBNETS:-3}-1))}; do + echo "terraform state mv 'module.${VPC_AND_VPN_MODULE_NAME}.module.vpc.aws_subnet.public[$COUNT]' 'module.${VPC_MODULE_NAME}.module.vpc.aws_subnet.public[$COUNT]'" | sh + echo "terraform state mv 'module.${VPC_AND_VPN_MODULE_NAME}.module.vpc.aws_route_table_association.public[$COUNT]' 'module.${VPC_MODULE_NAME}.module.vpc.aws_route_table_association.public[$COUNT]'" | sh +done + +echo "terraform state mv 'module.${VPC_AND_VPN_MODULE_NAME}.module.vpc.aws_eip.nat[0]' 'module.${VPC_MODULE_NAME}.module.vpc.aws_eip.nat[0]'" | sh +echo "terraform state mv 'module.${VPC_AND_VPN_MODULE_NAME}.module.vpc.aws_internet_gateway.this[0]' 'module.${VPC_MODULE_NAME}.module.vpc.aws_internet_gateway.this[0]'" | sh +echo "terraform state mv 'module.${VPC_AND_VPN_MODULE_NAME}.module.vpc.aws_nat_gateway.this[0]' 'module.${VPC_MODULE_NAME}.module.vpc.aws_nat_gateway.this[0]'" | sh +echo "terraform state mv 'module.${VPC_AND_VPN_MODULE_NAME}.module.vpc.aws_route.private_nat_gateway[0]' 'module.${VPC_MODULE_NAME}.module.vpc.aws_route.private_nat_gateway[0]'" | sh +echo "terraform state mv 'module.${VPC_AND_VPN_MODULE_NAME}.module.vpc.aws_route.public_internet_gateway[0]' 'module.${VPC_MODULE_NAME}.module.vpc.aws_route.public_internet_gateway[0]'" | sh + + +echo "terraform state mv 'module.${VPC_AND_VPN_MODULE_NAME}.aws_iam_access_key.furyagent' 'module.${VPN_MODULE_NAME}.aws_iam_access_key.furyagent'" | sh +echo "terraform state mv 'module.${VPC_AND_VPN_MODULE_NAME}.aws_iam_policy.furyagent' 'module.${VPN_MODULE_NAME}.aws_iam_policy.furyagent'" | sh +echo "terraform state mv 'module.${VPC_AND_VPN_MODULE_NAME}.aws_iam_policy_attachment.furyagent' 'module.${VPN_MODULE_NAME}.aws_iam_policy_attachment.furyagent'" | sh +echo "terraform state mv 'module.${VPC_AND_VPN_MODULE_NAME}.aws_iam_user.furyagent' 'module.${VPN_MODULE_NAME}.aws_iam_user.furyagent'" | sh +echo "terraform state mv 'module.${VPC_AND_VPN_MODULE_NAME}.aws_s3_bucket.furyagent' 'module.${VPN_MODULE_NAME}.aws_s3_bucket.furyagent'" | sh +echo "terraform state mv 'module.${VPC_AND_VPN_MODULE_NAME}.aws_security_group.vpn' 'module.${VPN_MODULE_NAME}.aws_security_group.vpn'" | sh +echo "terraform state mv 'module.${VPC_AND_VPN_MODULE_NAME}.aws_security_group_rule.vpn' 'module.${VPN_MODULE_NAME}.aws_security_group_rule.vpn'" | sh +echo "terraform state mv 'module.${VPC_AND_VPN_MODULE_NAME}.aws_security_group_rule.vpn_egress' 'module.${VPN_MODULE_NAME}.aws_security_group_rule.vpn_egress'" | sh +echo "terraform state mv 'module.${VPC_AND_VPN_MODULE_NAME}.aws_security_group_rule.vpn_ssh' 'module.${VPN_MODULE_NAME}.aws_security_group_rule.vpn_ssh'" | sh +echo "terraform state mv 'module.${VPC_AND_VPN_MODULE_NAME}.local_file.furyagent' 'module.${VPN_MODULE_NAME}.local_file.furyagent'" | sh +echo "terraform state mv 'module.${VPC_AND_VPN_MODULE_NAME}.local_file.sshkeys' 'module.${VPN_MODULE_NAME}.local_file.sshkeys'" | sh +echo "terraform state mv 'module.${VPC_AND_VPN_MODULE_NAME}.null_resource.init' 'module.${VPN_MODULE_NAME}.null_resource.init'" | sh +echo "terraform state mv 'module.${VPC_AND_VPN_MODULE_NAME}.null_resource.ssh_users' 'module.${VPN_MODULE_NAME}.null_resource.ssh_users'" | sh + +terraform plan -out terraform.plan +``` + +# ON FURY root module +```shell +terraform init -upgrade + +export EKS_MODULE_NAME=fury +echo "terraform state rm 'module.${EKS_MODULE_NAME}.module.cluster.kubernetes_config_map.aws_auth[0]'" | sh +echo "terraform state mv 'module.${EKS_MODULE_NAME}.aws_security_group.nodes' 'module.${EKS_MODULE_NAME}.aws_security_group.node_pool_shared'" | sh +echo "terraform state mv 'module.${EKS_MODULE_NAME}.aws_security_group_rule.ssh_from_dmz_to_nodes' 'module.${EKS_MODULE_NAME}.aws_security_group_rule.ssh_to_nodes'" | sh +``` + +One of the trickiest parts of the migration consists in moving the node pools from the old version that uses indexes (via a list-type property) to the new one using keys(via an object-typed property). In order to do the move, you will have to use the `var.node_pool.*.name` as key. +See the example below: +Check the terraform plan to understand how to map index with keys. + +> E.g: +> +> ```hcl +> terraform plan +> +> ... +> ## module.fury.aws_security_group.node_pool[0] will be destroyed +> ## (because resource does not use count) +> #- resource "aws_security_group" "node_pool" { +> # - arn = "arn:aws:ec2:eu-west-1:492816857163:security-group/sg-00a99dc5940513efc" -> null +> # - description = "Additional security group for the node pool m5-node-pool in the fury-eks cluster" -> null +> # - egress = [] -> null +> # - id = "sg-00a99dc5940513efc" -> null +> # - ingress = [ +> # - { +> # - cidr_blocks = [ +> # - "0.0.0.0/0", +> # ] +> # - description = "" +> # - from_port = 80 +> # - ipv6_cidr_blocks = [] +> # - prefix_list_ids = [] +> # - protocol = "tcp" +> # - security_groups = [] +> # - self = false +> # - to_port = 80 +> # }, +> # ] -> null +> # - name = "fury-eks-nodepool-m5-node-pool" -> null +> # - owner_id = "492816857163" -> null +> # - revoke_rules_on_delete = false -> null +> # - tags = { +> # - "Environment" = "kfd-development" +> # - "cluster-tags" = "my-value-OVERRIDE-1" +> # - "description" = "testing fury-eks-installer from v1 to v2" +> # - "hello" = "tag" +> # - "issue" = "https://github.com/sighupio/product-management/issues/195" +> # - "node-tags" = "exists" +> # } -> null +> # - tags_all = { +> # - "Environment" = "kfd-development" +> # - "cluster-tags" = "my-value-OVERRIDE-1" +> # - "description" = "testing fury-eks-installer from v1 to v2" +> # - "hello" = "tag" +> # - "issue" = "https://github.com/sighupio/product-management/issues/195" +> # - "node-tags" = "exists" +> # } -> null +> # - vpc_id = "vpc-0a5ae4547bd95e85f" -> null +> # } +> # +> ## module.fury.aws_security_group.node_pool[1] will be destroyed +> ## (because resource does not use count) +> #- resource "aws_security_group" "node_pool" { +> # - arn = "arn:aws:ec2:eu-west-1:492816857163:security-group/sg-0647ff22ae85f16e7" -> null +> # - description = "Additional security group for the node pool m5-node-pool-spot in the fury-eks cluster" -> null +> # - egress = [] -> null +> # - id = "sg-0647ff22ae85f16e7" -> null +> # - ingress = [ +> # - { +> # - cidr_blocks = [ +> # - "0.0.0.0/0", +> # ] +> # - description = "" +> # - from_port = 80 +> # - ipv6_cidr_blocks = [] +> # - prefix_list_ids = [] +> # - protocol = "tcp" +> # - security_groups = [] +> # - self = false +> # - to_port = 80 +> # }, +> # ] -> null +> # - name = "fury-eks-nodepool-m5-node-pool-spot" -> null +> # - owner_id = "492816857163" -> null +> # - revoke_rules_on_delete = false -> null +> # - tags = { +> # - "Environment" = "kfd-development" +> # - "cluster-tags" = "my-value-OVERRIDE-2" +> # - "description" = "testing fury-eks-installer from v1 to v2" +> # - "hello" = "tag" +> # - "issue" = "https://github.com/sighupio/product-management/issues/195" +> # - "node-tags" = "exists" +> # } -> null +> # - tags_all = { +> # - "Environment" = "kfd-development" +> # - "cluster-tags" = "my-value-OVERRIDE-2" +> # - "description" = "testing fury-eks-installer from v1 to v2" +> # - "hello" = "tag" +> # - "issue" = "https://github.com/sighupio/product-management/issues/195" +> # - "node-tags" = "exists" +> # } -> null +> # - vpc_id = "vpc-0a5ae4547bd95e85f" -> null +> # } +> # +> ## module.fury.aws_security_group.node_pool["m5-node-pool"] will be created +> #+ resource "aws_security_group" "node_pool" { +> # + arn = (known after apply) +> # + description = "Additional security group for the node pool m5-node-pool in the fury-eks cluster" +> # + egress = (known after apply) +> # + id = (known after apply) +> # + ingress = (known after apply) +> # + name = "fury-eks-nodepool-m5-node-pool" +> # + name_prefix = (known after apply) +> # + owner_id = (known after apply) +> # + revoke_rules_on_delete = false +> # + tags = { +> # + "Environment" = "kfd-development" +> # + "cluster-tags" = "my-value-OVERRIDE-1" +> # + "description" = "testing fury-eks-installer from v1 to v2" +> # + "hello" = "tag" +> # + "issue" = "https://github.com/sighupio/product-management/issues/195" +> # + "node-tags" = "exists" +> # } +> # + tags_all = { +> # + "Environment" = "kfd-development" +> # + "cluster-tags" = "my-value-OVERRIDE-1" +> # + "description" = "testing fury-eks-installer from v1 to v2" +> # + "hello" = "tag" +> # + "issue" = "https://github.com/sighupio/product-management/issues/195" +> # + "node-tags" = "exists" +> # } +> # + vpc_id = "vpc-0a5ae4547bd95e85f" +> # } +> # +> ## module.fury.aws_security_group.node_pool["m5-node-pool-spot"] will be created +> #+ resource "aws_security_group" "node_pool" { +> # + arn = (known after apply) +> # + description = "Additional security group for the node pool m5-node-pool-spot in the fury-eks cluster" +> # + egress = (known after apply) +> # + id = (known after apply) +> # + ingress = (known after apply) +> # + name = "fury-eks-nodepool-m5-node-pool-spot" +> # + name_prefix = (known after apply) +> # + owner_id = (known after apply) +> # + revoke_rules_on_delete = false +> # + tags = { +> # + "Environment" = "kfd-development" +> # + "cluster-tags" = "my-value-OVERRIDE-2" +> # + "description" = "testing fury-eks-installer from v1 to v2" +> # + "hello" = "tag" +> # + "issue" = "https://github.com/sighupio/product-management/issues/195" +> # + "node-tags" = "exists" +> # } +> # + tags_all = { +> # + "Environment" = "kfd-development" +> # + "cluster-tags" = "my-value-OVERRIDE-2" +> # + "description" = "testing fury-eks-installer from v1 to v2" +> # + "hello" = "tag" +> # + "issue" = "https://github.com/sighupio/product-management/issues/195" +> # + "node-tags" = "exists" +> # } +> # + vpc_id = "vpc-0a5ae4547bd95e85f" +> # } +> # ... +> +> ``` + +Then, you should run a command per node pool, resembling the following: + +```shell +echo "terraform state mv 'module.${EKS_MODULE_NAME}.aws_security_group.node_pool[0]' 'module.${EKS_MODULE_NAME}.aws_security_group.node_pool[\"m5-node-pool\"]'" | sh +echo "terraform state mv 'module.${EKS_MODULE_NAME}.aws_security_group.node_pool[1]' 'module.${EKS_MODULE_NAME}.aws_security_group.node_pool[\"m5-node-pool-spot\"]'" | sh +``` + +The last thing to do is importing the kubernetes config map containing the aws authentication information: + +```shell +echo terraform import "module.${EKS_MODULE_NAME}.module.cluster.kubernetes_config_map.aws_auth[0]" 'kube-system/aws-auth' | sh +terraform apply +``` diff --git a/modules/eks/ec2.tf b/modules/eks/ec2.tf index 9198214..f3ddd60 100644 --- a/modules/eks/ec2.tf +++ b/modules/eks/ec2.tf @@ -166,6 +166,6 @@ resource "aws_security_group_rule" "ssh_to_nodes" { from_port = 22 to_port = 22 protocol = "tcp" - cidr_blocks = var.ssh_to_nodes_allowed_cidr_blocks != null ? var.ssh_to_nodes_allowed_cidr_blocks : var.cluster_endpoint_private_access_cidrs + cidr_blocks = coalescelist(var.ssh_to_nodes_allowed_cidr_blocks , var.cluster_endpoint_private_access_cidrs) security_group_id = aws_security_group.node_pool_shared.id } diff --git a/modules/vpc/README.md b/modules/vpc/README.md index 398f3f7..bd6b471 100644 --- a/modules/vpc/README.md +++ b/modules/vpc/README.md @@ -8,11 +8,11 @@ | Name | Version | |------|---------| -| terraform | >= 1.3 | +| terraform | ~> 1.3 | | aws | ~> 3.76 | -| external | ~> 2.3 | -| local | ~> 2.4 | -| null | ~> 3.2 | +| external | ~> 2.3 | +| local | ~> 2.4 | +| null | ~> 3.2 | ## Providers diff --git a/modules/vpc/main.tf b/modules/vpc/main.tf index 52bc57f..1bb0f07 100644 --- a/modules/vpc/main.tf +++ b/modules/vpc/main.tf @@ -1,5 +1,5 @@ terraform { - required_version = ">= 1.3" + required_version = "~> 1.3" required_providers { local = "~> 2.4" null = "~> 3.2" diff --git a/modules/vpn/README.md b/modules/vpn/README.md index c6f7881..1269fd8 100644 --- a/modules/vpn/README.md +++ b/modules/vpn/README.md @@ -8,11 +8,11 @@ | Name | Version | |------|---------| -| terraform | >= 1.3 | +| terraform | ~> 1.3 | | aws | ~> 3.76 | -| external | ~> 2.3 | -| local | ~> 2.4 | -| null | ~> 3.2 | +| external | ~> 2.3 | +| local | ~> 2.4 | +| null | ~> 3.2 | ## Providers diff --git a/modules/vpn/main.tf b/modules/vpn/main.tf index e327758..88d59b0 100644 --- a/modules/vpn/main.tf +++ b/modules/vpn/main.tf @@ -1,5 +1,5 @@ terraform { - required_version = ">= 1.3" + required_version = "~> 1.3" required_providers { local = "~> 2.4" null = "~> 3.2" @@ -75,7 +75,7 @@ resource "aws_eip_association" "vpn" { // BUCKET AND IAM resource "aws_s3_bucket" "furyagent" { - bucket_prefix = "${var.name}-vpn-bucket-" + bucket_prefix = coalesce(var.vpn_bucket_name_prefix, "${var.name}-vpn-bucket-") acl = "private" force_destroy = true diff --git a/modules/vpn/variables.tf b/modules/vpn/variables.tf index eb383bf..2a72cb4 100644 --- a/modules/vpn/variables.tf +++ b/modules/vpn/variables.tf @@ -81,3 +81,9 @@ variable "vpn_routes" { })) default = null } + +variable "vpn_bucket_name_prefix" { + type = string + description = "Bucket name prefix for VPN configuration files" + default = "" +}