forked from edwardcallahan/conductr-ansible
-
Notifications
You must be signed in to change notification settings - Fork 11
/
create-private-agent-network-ec2.yml
399 lines (367 loc) · 11.7 KB
/
create-private-agent-network-ec2.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
---
#
# Create a VPC network, subnets et al for use with build-private-agent-cluster.yml
# Launch a bastion into VPC for running build-private-agent-cluster.yml with required private network access
#
- name: Create Lightbend EC2 network w/ Private Agents
hosts: localhost
connection: local
gather_facts: False
# This image is Ubuntu 16.04 LTS amd64 hvm:ebs-ssd in us-east-1
# For updates and other regions https://cloud-images.ubuntu.com/locator/ec2/
#
# Must be changed to region local Ubuntu AMI for all other regions
#
vars:
BASTION_IMAGE: "ami-cd0f5cb6"
BASTION_INSTANCE_TYPE: "t2.large"
BASTION_VOL_TYPE: "gp2"
BASTION_VOL_SIZE: 100
EC2_REGION: us-east-1
admin_name: "Lightbend Admin Access"
core_name: "Lightbend Core Node"
ingress_name: "Lightbend ELB Ingress"
private_agent_name: "Lightbend Private Agent"
public_agent_name: "Lightbend Public Agent"
tasks:
- name: Create VPC
local_action:
module: ec2_vpc
cidr_block: 10.100.0.0/16
resource_tags:
Name: "Lightbend Enterprise Suite Cluster VPC"
region: "{{ EC2_REGION }}"
dns_hostnames: yes
dns_support: yes
internet_gateway: True
route_tables:
- subnets:
- 10.100.1.0/24
- 10.100.3.0/24
- 10.100.5.0/24
routes:
- dest: 0.0.0.0/0
gw: igw
subnets:
- cidr: 10.100.1.0/24
az: "{{ EC2_REGION }}a"
resource_tags:
Name: "Lightbend {{ EC2_REGION }}-A Public SN"
- cidr: 10.100.3.0/24
az: "{{ EC2_REGION }}b"
resource_tags:
Name: "Lightbend {{ EC2_REGION }}-B Public SN"
# Use 'a' or 'b' again for 2 AZs
- cidr: 10.100.5.0/24
az: "{{ EC2_REGION }}c"
resource_tags:
Name: "Lightbend {{ EC2_REGION }}-C Public SN"
state: present
register: vpc
- name: Create NAT Gateway
ec2_vpc_nat_gateway:
state: present
subnet_id: "{{ vpc.subnets[0].id }}"
wait: yes
region: "{{ EC2_REGION }}"
if_exist_do_not_create: true
register: nat_gateway
- name: Create Private Subnets
ec2_vpc_subnet:
state: present
vpc_id: "{{ vpc.vpc_id }}"
region: "{{ EC2_REGION }}"
cidr: 10.100.2.0/24
az: "{{ EC2_REGION }}a"
resource_tags:
Name: "Lightbend {{ EC2_REGION }}-A Private SN"
register: private_SN_A
- name: Create Private Subnet
ec2_vpc_subnet:
state: present
vpc_id: "{{ vpc.vpc_id }}"
region: "{{ EC2_REGION }}"
cidr: 10.100.4.0/24
az: "{{ EC2_REGION }}b"
resource_tags:
Name: "Lightbend {{ EC2_REGION }}-B Private SN"
register: private_SN_B
- name: Create Private Subnets
ec2_vpc_subnet:
state: present
vpc_id: "{{ vpc.vpc_id }}"
region: "{{ EC2_REGION }}"
cidr: 10.100.6.0/24
az: "{{ EC2_REGION }}c"
resource_tags:
Name: "Lightbend {{ EC2_REGION }}-C Private SN"
register: private_SN_C
- name: Set up NAT-protected route table
ec2_vpc_route_table:
vpc_id: "{{ vpc.vpc_id }}"
region: "{{ EC2_REGION }}"
subnets:
- "Lightbend {{ EC2_REGION }}-A Private SN"
- "Lightbend {{ EC2_REGION }}-B Private SN"
- "Lightbend {{ EC2_REGION }}-C Private SN"
routes:
- dest: 0.0.0.0/0
gateway_id: "{{ nat_gateway.nat_gateway_id}}"
- name: Create Admin SG
local_action:
module: ec2_group
name: "{{ admin_name }}"
description: "{{ admin_name }}"
vpc_id: "{{ vpc.vpc_id }}"
region: "{{ vpc.vpc.region }}"
state: present
register: admin_sg
- name: Create Ingress SG
local_action:
module: ec2_group
name: "{{ ingress_name }}"
description: "{{ ingress_name }}"
vpc_id: "{{ vpc.vpc_id }}"
region: "{{ vpc.vpc.region }}"
state: present
register: ingress_sg
- pause:
prompt: "Prevent timing hazard of dependencies"
seconds: 5
- name: Create Public Agent SG
local_action:
module: ec2_group
name: "{{ public_agent_name }}"
description: "{{ public_agent_name }}"
vpc_id: "{{ vpc.vpc_id }}"
region: "{{ vpc.vpc.region }}"
register: public_agent_sg
- name: Create Private Agent SG
local_action:
module: ec2_group
name: "{{ private_agent_name }}"
description: "{{ private_agent_name }}"
vpc_id: "{{ vpc.vpc_id }}"
region: "{{ vpc.vpc.region }}"
state: present
register: private_agent_sg
- name: Create Core Nodes SG
local_action:
module: ec2_group
name: "{{ core_name }}"
description: "{{ core_name }}"
vpc_id: "{{ vpc.vpc_id }}"
region: "{{ vpc.vpc.region }}"
state: present
register: core_sg
- name: Admin SG Rules
local_action:
module: ec2_group
name: "{{ admin_name }}"
description: "{{ admin_name }}"
region: "{{ vpc.vpc.region }}"
rules:
- proto: tcp
from_port: 22
to_port: 22
cidr_ip: 0.0.0.0/0
- name: Ingress SG Rules
local_action:
module: ec2_group
name: "{{ ingress_name }}"
description: "{{ ingress_name }}"
region: "{{ vpc.vpc.region }}"
rules:
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
- proto: tcp
from_port: 443
to_port: 443
cidr_ip: 0.0.0.0/0
- name: Public Agents SG Rules
local_action:
module: ec2_group
name: "{{ public_agent_name }}"
description: "{{ public_agent_name }}"
region: "{{ vpc.vpc.region }}"
rules:
# Admin SSH
- proto: tcp
from_port: 22
to_port: 22
group_id: "{{ admin_sg.group_id}}"
# Remoting
- proto: tcp
from_port: 2552
to_port: 2552
group_id: "{{ core_sg.group_id}}"
# ELB health check
- proto: tcp
from_port: 9009
to_port: 9009
group_id: "{{ ingress_sg.group_id }}"
# Visualizer
- proto: tcp
from_port: 9999
to_port: 9999
group_id: "{{ ingress_sg.group_id }}"
- name: Private Agents
local_action:
module: ec2_group
name: "{{ private_agent_name }}"
description: "{{ private_agent_name }}"
region: "{{ vpc.vpc.region }}"
rules:
# Admin SSH
- proto: tcp
from_port: 22
to_port: 22
group_id: "{{ admin_sg.group_id}}"
# Akka Remoting
- proto: tcp
from_port: 2552
to_port: 2552
group_id: "{{ core_sg.group_id}}"
# Proxy access
- proto: tcp
from_port: 10000
to_port: 10999
group_name: "{{ core_sg.group_id }}"
# Proxy access from ConductR private agents
- proto: tcp
from_port: 10000
to_port: 10999
group_name: "{{ private_agent_sg.group_id }}"
# Proxy access from ConductR public agents
- proto: tcp
from_port: 10000
to_port: 10999
group_name: "{{ public_agent_sg.group_id }}"
- name: Core Nodes
local_action:
module: ec2_group
name: "{{ core_name }}"
description: "{{ core_name }}"
region: "{{ vpc.vpc.region }}"
rules:
# Admin SSH
- proto: tcp
from_port: 22
to_port: 22
group_id: "{{ admin_sg.group_id}}"
# Status Server, Control Proto, and Service Locator
# Akka Remoting, Control Protocol, Bundle Stream Server,
# Status Server and Service Locator
- proto: tcp
from_port: 9004
to_port: 9008
group_id: "{{ public_agent_sg.group_id }}"
- proto: tcp
from_port: 9004
to_port: 9008
group_id: "{{ private_agent_sg.group_id }}"
- proto: tcp
from_port: 9004
to_port: 9008
group_name: "{{ core_sg.group_id }}"
- name: Launch Bastion Node
local_action:
module: ec2
image: "{{ BASTION_IMAGE }}"
instance_type: "{{ BASTION_INSTANCE_TYPE }}"
keypair: "{{ KEYPAIR }}"
region: "{{ EC2_REGION }}"
group_id: "{{ admin_sg.group_id }}"
vpc_subnet_id: "{{ vpc.subnets[0].id }}"
assign_public_ip: yes
instance_tags:
Name: "Lightbend Admin Bastion"
Role: Bastion Node
count: 1
volumes:
- device_name: /dev/sda1
device_type: "{{ BASTION_VOL_TYPE }}"
volume_size: "{{ BASTION_VOL_SIZE }}"
delete_on_termination: true
wait: yes
register: ec2
- name: Add to bastion_public
add_host:
groupname: "bastion_public"
hostname: "{{ ec2.instances[0].public_ip }}"
- name: Wait for SSH to come up
wait_for:
host: "{{ groups.bastion_public[0] }}"
port: 22
delay: 60
timeout: 320
state: started
- name: Create ELB
local_action:
module: ec2_elb_lb
name: "Lightbend-ELB-{{ EC2_REGION }}"
scheme: internet-facing
security_group_ids: "{{ ingress_sg.group_id }}"
state: present
cross_az_load_balancing: yes
region: "{{ EC2_REGION }}"
subnets:
- "{{ vpc.subnets[0].id }}"
- "{{ vpc.subnets[1].id }}"
- "{{ vpc.subnets[2].id }}"
listeners:
# Upload a cert to use SSL
# Example listener for Visualizer 80 -> 9999
- protocol: http
load_balancer_port: 80
instance_port: 9999
health_check:
ping_protocol: http
ping_port: 9009
ping_path: /status
response_timeout: 5
interval: 30
unhealthy_threshold: 2
healthy_threshold: 3
register: elb
- debug: msg="ELB zone name {{ elb.elb.dns_name }}"
- debug: msg="Add listeners to {{ elb.elb.dns_name }} to expose bundle endpoints"
- debug: msg="Upload x.509 certificate to ELB for SSL endpoints"
- name: Create vars file
template:
src: templates/vars-private-agent.j2
dest: "vars/{{ EC2_REGION }}_private_agent_vars.yml"
- debug: msg="Vars file vars/{{ EC2_REGION }}_private_agent_vars.yml created"
- name: Setup Bastion
hosts: bastion_public
user: "ubuntu"
gather_facts: False
sudo: True
vars:
REMOTE_USER: "ubuntu"
vars_files:
- "vars/{{ EC2_REGION }}_private_agent_vars.yml"
tasks:
- include: python/tasks/main.yml
- include: ntp/tasks/main.yml
- include: ansible/tasks/setup-ansible.yml
- include: conductr/tasks/install-cli-pip.yml
- include: java/tasks/openjdk.yml
- include: system/tasks/update-ubuntu.yml
# Copy to Bastion to complete:
# * ConductR and ConductR-Agent systemd deb files
# * Private PEM key for nodes
# * .dockercfg for nodes, if needed
# * lightbend commercial.credentials
# * access token
# * vars file from network creation
#
# Export to Env
# * AWS Keys
# * Source Ansible hacking/env-setup
# * Disable host checking with export ANSIBLE_HOST_KEY_CHECKING=False
#
# Run build cluster play from bastion
# Build Cluster `nohup ansible-playbook build-private-agent-cluster-ec2.yml -e "VARS_FILE=vars/{{EC2_REGION}}_vars.yml" --private-key /path/to/{{keypair}} > build_private_cluster.out &`