-
Notifications
You must be signed in to change notification settings - Fork 7
/
main.tf
107 lines (99 loc) · 3.86 KB
/
main.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
terraform {
required_version = "1.2.9"
required_providers {
aws = {
source = "hashicorp/aws"
}
tls = {
source = "hashicorp/tls"
}
helm = {
source = "hashicorp/helm"
}
rancher2 = {
source = "rancher/rancher2"
}
}
}
provider "aws" {
region = local.region
}
module "aws_shared" {
source = "./aws_shared"
project_name = local.project_name
ssh_public_key_path = local.ssh_public_key_path
}
module "aws_network" {
source = "./aws_network"
region = local.region
availability_zone = local.availability_zone
project_name = local.project_name
}
module "bastion" {
depends_on = [module.aws_network]
source = "./aws_host"
ami = local.bastion_ami
availability_zone = local.availability_zone
project_name = local.project_name
name = "bastion"
ssh_key_name = module.aws_shared.key_name
ssh_private_key_path = local.ssh_private_key_path
subnet_id = module.aws_network.public_subnet_id
vpc_security_group_id = module.aws_network.public_security_group_id
}
module "upstream_cluster" {
source = "./aws_rke2"
ami = local.upstream_ami
instance_type = local.upstream_instance_type
availability_zone = local.availability_zone
project_name = local.project_name
name = "upstream"
server_count = local.upstream_server_count
agent_count = local.upstream_agent_count
ssh_key_name = module.aws_shared.key_name
ssh_private_key_path = local.ssh_private_key_path
ssh_bastion_host = module.bastion.public_name
subnet_id = module.aws_network.private_subnet_id
vpc_security_group_id = module.aws_network.private_security_group_id
k8s_api_ssh_tunnel_local_port = 6443
additional_ssh_tunnels = [[3000, 443]]
rke2_version = local.upstream_rke2_version
max_pods = local.upstream_max_pods
node_cidr_mask_size = local.upstream_node_cidr_mask_size
sans = [local.upstream_san]
}
provider "helm" {
kubernetes {
config_path = "./config/upstream.yaml"
}
}
module "rancher" {
depends_on = [module.upstream_cluster]
count = local.upstream_server_count > 0 ? 1 : 0
source = "./rancher"
public_name = local.upstream_san
private_name = module.upstream_cluster.first_server_private_name
api_token_string = module.secret.values.api_token_string
chart = local.rancher_chart
}
module "downstream_cluster" {
source = "./aws_rke2"
count = 1
ami = local.downstream_ami
instance_type = local.downstream_instance_type
availability_zone = local.availability_zone
project_name = local.project_name
name = "downstream-${count.index}"
server_count = local.downstream_server_count
agent_count = local.downstream_agent_count
ssh_key_name = module.aws_shared.key_name
ssh_private_key_path = local.ssh_private_key_path
ssh_bastion_host = module.bastion.public_name
subnet_id = module.aws_network.private_subnet_id
vpc_security_group_id = module.aws_network.private_security_group_id
k8s_api_ssh_tunnel_local_port = 6444 + count.index
rke2_version = local.downstream_rke2_version
max_pods = local.downstream_max_pods
node_cidr_mask_size = local.downstream_node_cidr_mask_size
sans = [local.downstream_san]
}