-
Notifications
You must be signed in to change notification settings - Fork 1
/
main.tf
311 lines (243 loc) · 8.38 KB
/
main.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
#update link https://github.com/terraform-google-modules/terraform-google-project-factory/tree/master/modules/project_services
# Allows other resources to refer to things like the authorization token for
# the configured Google account
data "google_client_config" "default" {}
module "project-services" {
source = "terraform-google-modules/project-factory/google//modules/project_services"
version = "10.1.1"
project_id = var.project
activate_apis = [
"compute.googleapis.com",
"container.googleapis.com",
"iam.googleapis.com",
"servicemanagement.googleapis.com",
"cloudresourcemanager.googleapis.com",
"ml.googleapis.com",
"meshconfig.googleapis.com",
]
disable_services_on_destroy = var.disable_googleapi_services_on_destroy
}
# The GKE cluster. The node pool is managed as a separate resource below.
resource "google_container_cluster" "kubeflow_cluster" {
depends_on = [
google_service_account.kubeflow_admin,
google_service_account.kubeflow_user,
google_service_account.kubeflow_vm,
]
provider = google-beta
name = var.cluster_name
location = var.cluster_zone
project = var.project
# TPU requires a separate ip range (https://cloud.google.com/tpu/docs/kubernetes-engine-setup)
# Disable it for now until we figure out how it works with xpn network
enable_tpu = false
min_master_version = var.min_master_version
network = var.network
subnetwork = var.subnetwork
# https://www.terraform.io/docs/providers/google/r/container_cluster.html
# recommends managing the node pool as a separate resource, which we do
# below.
remove_default_node_pool = true
initial_node_count = "1"
ip_allocation_policy {
cluster_secondary_range_name = var.cluster_secondary_range_name
services_secondary_range_name = var.services_secondary_range_name
}
resource_labels = {
"application" = "kubeflow"
"env" = var.env_label
}
addons_config {
horizontal_pod_autoscaling {
disabled = false
}
http_load_balancing {
disabled = false
}
network_policy_config {
disabled = var.network_policy_enabled == false ? true : false
}
}
enable_legacy_abac = false
master_auth {
client_certificate_config {
issue_client_certificate = var.issue_client_certificate
}
# Setting an empty username disables basic auth
# From https://cloud.google.com/sdk/gcloud/reference/container/clusters/create:
# --no-enable-basic-auth is an alias for --username=""
username = ""
# password is required if username is present
password = ""
}
network_policy {
enabled = var.network_policy_enabled
provider = var.network_policy_enabled == true ? "CALICO" : "null"
}
logging_service = "logging.googleapis.com/kubernetes"
monitoring_service = "monitoring.googleapis.com/kubernetes"
timeouts {
create = var.timeout
update = var.timeout
delete = var.timeout
}
# node auto-provisioning, they screwed up the name of fields here
# https://github.com/terraform-providers/terraform-provider-google/issues/3303#issuecomment-477251119
cluster_autoscaling {
enabled = false
}
}
resource "google_container_node_pool" "main_pool" {
# max_pods_per_node is in google-beta as of 2019-07-26
provider = google-beta
cluster = google_container_cluster.kubeflow_cluster.name
location = var.cluster_zone
project = var.project
name = var.main_node_pool_name
version = var.node_version
initial_node_count = var.initial_node_count
management {
auto_repair = var.auto_repair
auto_upgrade = var.auto_upgrade
}
autoscaling {
min_node_count = var.main_node_pool_min_nodes
max_node_count = var.main_node_pool_max_nodes
}
max_pods_per_node = var.max_pods_per_node
node_config {
machine_type = var.main_node_pool_machine_type
min_cpu_platform = "Intel Broadwell"
service_account = google_service_account.kubeflow_vm.email
// These scopes are needed for the GKE nodes' service account to have pull rights to GCR.
// Default is "https://www.googleapis.com/auth/logging.write" and "https://www.googleapis.com/auth/monitoring".
oauth_scopes = [
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring",
"https://www.googleapis.com/auth/devstorage.read_only",
]
}
timeouts {
create = var.timeout
update = var.timeout
delete = var.timeout
}
}
resource "google_container_node_pool" "gpu_pool" {
# max_pods_per_node is in google-beta as of 2019-07-26
provider = google-beta
cluster = google_container_cluster.kubeflow_cluster.name
location = var.cluster_zone
project = var.project
name = var.gpu_node_pool_name
version = var.node_version
initial_node_count = "0"
management {
auto_repair = var.auto_repair
auto_upgrade = var.auto_upgrade
}
autoscaling {
min_node_count = "0"
max_node_count = "10"
}
max_pods_per_node = var.max_pods_per_node
node_config {
machine_type = var.gpu_node_pool_machine_type
guest_accelerator {
type = var.gpu_type
count = 1
}
min_cpu_platform = "Intel Broadwell"
service_account = google_service_account.kubeflow_vm.email
// These scopes are needed for the GKE nodes' service account to have pull rights to GCR.
// Default is "https://www.googleapis.com/auth/logging.write" and "https://www.googleapis.com/auth/monitoring".
oauth_scopes = [
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring",
"https://www.googleapis.com/auth/devstorage.read_only",
]
}
timeouts {
create = var.timeout
update = var.timeout
delete = var.timeout
}
}
resource "google_container_node_pool" "highmem_pool" {
# max_pods_per_node is using the default value defined in google-beta api
provider = google-beta
cluster = google_container_cluster.kubeflow_cluster.name
location = var.cluster_zone
project = var.project
name = var.highmem_node_pool_name
version = var.node_version
initial_node_count = "0"
management {
auto_repair = var.auto_repair
auto_upgrade = var.auto_upgrade
}
autoscaling {
min_node_count = "0"
max_node_count = "10"
}
max_pods_per_node = var.max_pods_per_node
node_config {
machine_type = var.highmem_node_pool_machine_type
min_cpu_platform = "Intel Broadwell"
service_account = google_service_account.kubeflow_vm.email
// These scopes are needed for the GKE nodes' service account to have pull rights to GCR.
// Default is "https://www.googleapis.com/auth/logging.write" and "https://www.googleapis.com/auth/monitoring".
oauth_scopes = [
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring",
"https://www.googleapis.com/auth/devstorage.read_only",
]
}
timeouts {
create = var.timeout
update = var.timeout
delete = var.timeout
}
}
# A persistent disk to use as the artifact store.
resource "google_compute_disk" "artifact_store" {
name = format("%s-%s", var.cluster_name, "artifact-store")
zone = var.cluster_zone
project = var.project
physical_block_size_bytes = 4096
size = 200
labels = {
"application" = "kubeflow"
"env" = var.env_label
"cloudsql-instance-suffix" = random_id.db_name_suffix.hex
# This label will be automatically created when the disk is attached to a GKE instance.
# We include it here to prevent Terraform deleting it.
"goog-gke-volume" = ""
}
}
resource "google_compute_resource_policy" "artifact_store-snapshot-schedule" {
name = format("%s-%s", google_compute_disk.artifact_store.name, "snapshot-schedule")
provider = google-beta
project = var.project
region = var.cluster_region
depends_on = [google_compute_disk.artifact_store]
snapshot_schedule_policy {
schedule {
daily_schedule {
days_in_cycle = 1
start_time = "04:00"
}
}
retention_policy {
max_retention_days = 7
on_source_disk_delete = "APPLY_RETENTION_POLICY"
}
snapshot_properties {
labels = {
"application" = "kubeflow"
"env" = var.env_label
"cloudsql-instance-suffix" = random_id.db_name_suffix.hex
}
}
}
}