forked from ReSearchITEng/kubeadm-playbook
-
Notifications
You must be signed in to change notification settings - Fork 0
/
vagrant_bridged_demo.sh
executable file
·249 lines (214 loc) · 10.3 KB
/
vagrant_bridged_demo.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
#!/usr/bin/env sh
set -e
#set -v
###
# This script mainly removes NAT from adaptor 1, and replace it with bridged for all machines
# If required, change the interface in the line below or pass as a parameter: --bridged_adapter
HOST_BRIDGED_INTERFACE=`ip route | grep default | head -1 | cut -d" " -f5`
# This is the same interface you may want to set in the Vagrantfile (or prompting you for)
# This entire script works only with virtualbox provider
# The script also modifies storage controller from IDE to SATA, and moves the disk from IDE to the newly created SATA
###
if [ $# -eq 0 ];then
cat <<EOF
Use any of these options:
--full # which does: vagrant destroy (if anything existed before), vagrant up (create machines),i vagrant halt (for config), fix bridged adaptor, start machines with VBoxManage startvm, generate ansible.cfg and hosts file (ansible inventory).
--bridged_adapter <host_adapter> | auto # which does: vagrant halt (for config), fix bridged adaptor, change from IDE to SATA, start machines with VBoxManage startvm, generate ansible.cfg and hosts file (inventory).
--restart # which does only: vagrant halt and start machines with VBoxManage startvm.
--regenerate_config # which only regenerates ansible.cfg and hosts file (ansible inventory)
NOTE: ONLY ONE OPTION AT A TIME
EOF
exit 1
fi
# Optionally, when started with params like reset or init, it also does vagrant up, etc.
while [ $# -gt 0 ]; do
case $1 in
--full)
vagrant destroy -f || true
vagrant up
shift
break
;;
--bridged_adapter)
if [ "${2}x" != "autox" ]; then
HOST_BRIDGED_INTERFACE=$2
fi
shift
shift
break
;;
--restart)
ACTIONS=restart
shift
break
;;
--start)
ACTIONS=start
shift
break
;;
--regenerate_config)
ACTIONS=regenerate_config
shift
break
;;
esac
done
# How to start
VAGRANT_LOG=info # debug,info,warn,error
#vagrant up
if [ "${ACTIONS}x" != "regenerate_configx" ];then
if [ "${ACTIONS}x" != "startx" ];then
### Stop all machines for reconfiguration and/or restart, but not when we only want to regenerate config
for runningVagrM in $(vagrant status | grep 'running (virtualbox)' | cut -d" " -f1); do
echo "going to run: vagrant halt -f $runningVagrM "
vagrant halt -f $runningVagrM
done
fi
### Get list of all already powered off machines:
echo "Getting the list of already powered off machines"
filter_machines_offvagm=$(vagrant status | grep 'poweroff (virtualbox)' | cut -d" " -f1)
if [ "${filter_machines_offvagm}x" = "x" ]; then
echo "There is no machine to manage, exit now"
exit 1 # && error_now
else
echo "list of already powered off machines: ${filter_machines_offvagm}"
fi
### Get list of machines created for local Vagrantfile in the current directory
filter_machines_local_directory=$(basename `pwd`) # note: cannot run it from /
### If the only request is restart, do it and exit
if [ "${ACTIONS}x" = "restartx" -o "${ACTIONS}x" = "startx" ];then
echo "### startng VMs ${filter_machines_offvagm}"
for vagrantM in ${filter_machines_offvagm}; do
#for vboxM in $(VBoxManage list vms | grep -v inaccessible | grep $vagrantM | grep $filter_machines_local_directory | cut -d'"' -f2 ); do
for vboxMUUID in $(VBoxManage list vms | grep -v inaccessible | grep $vagrantM | grep $filter_machines_local_directory | cut -d'{' -f2 | tr -d '}' ); do #UsesUUID
VBoxManage startvm $vboxMUUID --type headless # DO NOT USE 'vagrant up', use VBoxManage startvm command
done
done
vagrant status
echo "Start vm triggered (via VBoxManage startvm). Once up, proceed with login using ssh -F ssh_config <host>; to check status use: vagrant status"
exit 0
fi
###
echo "### Reconfiguring machines ${filter_machines_offvagm}"
if [ "${HOST_BRIDGED_INTERFACE}x" = "x" ]; then
HOST_BRIDGED_INTERFACE=`ip route | head -1 | cut -d" " -f5`
echo "!WARNING!: There was no interface provided, and is no default interface on this machine. Going to use: $HOST_BRIDGED_INTERFACE"
fi
#set -vx
for vagrantM in ${filter_machines_offvagm}; do
vagrantM_nodot=$(echo $vagrantM| tr -d ".")
#for vboxM in $(VBoxManage list vms | grep -v inaccessible | grep $vagrantM | grep $filter_machines_local_directory | cut -d'"' -f2 ); do #Uses names
for vboxMUUID in $(VBoxManage list vms | grep -v inaccessible | grep $vagrantM_nodot | grep $filter_machines_local_directory | cut -d'{' -f2 | tr -d '}' ); do #UsesUUID
echo "Modifying the interfaces, disabling the NAT and making first interface bridged with for vagrantM=$vagrantM (vboxMUUID=$vboxMUUID)"
#### Change VM's network interfaces, NAT to bridged:
#VBoxManage showvminfo $M | grep -i nic #"--machinereadable"
VBoxManage modifyvm $vboxMUUID --nic1 none --nic2 none --nic3 none --nic4 none --nic5 none --nic6 none --nic7 none --nic8 none
VBoxManage modifyvm $vboxMUUID --nic1 bridged --bridgeadapter1 $HOST_BRIDGED_INTERFACE --nictype1 virtio --macaddress1 auto
#VBoxManage modifyvm $vboxMUUID --nic1 bridged --bridgeadapter1 $HOST_BRIDGED_INTERFACE --nictype1 Am79C973 --macaddress1 auto
#VBoxManage modifyvm $vboxMUUID --nic1 bridged --bridgeadapter1 $HOST_BRIDGED_INTERFACE --nictype1 virtio --macaddress1 auto
#VBoxManage modifyvm $vboxMUUID --nic1 bridged --bridgeadapter1 $HOST_BRIDGED_INTERFACE --nictype1 82540EM --macaddress1 auto
#VBoxManage modifyvm $vboxMUUID --nic2 nat --nictype2 82540EM --macaddress2 auto --natnet2 "10.0.2.0/24" --natpf2 "ssh,tcp,127.0.0.1,2222,,22" --natdnsproxy2 off --natdnshostresolver2 off # This is optional
#### (optional but recommended), change disk IDE TO SATA". Centos comes by default with unperformant controller: IDE (not SATA/SCSI/etc)
echo "Modifying the controller from IDE to SATA for vagrantM=$vagrantM (vboxMUUID=$vboxMUUID)"
# Get disk
#IDE_VMDK_PATH=$(VBoxManage showvminfo --machinereadable $vboxMUUID | grep -i "IDE" | grep '\.vmdk' | cut -d '"' -f4)
IDE_VMDK_ImageUUID=$(VBoxManage showvminfo --machinereadable $vboxMUUID | grep -i "IDE" | grep ImageUUID | cut -d '"' -f4)
if [ "${IDE_VMDK_ImageUUID}x" != "x" ]; then
echo "Changind disk from IDE to SATA for disk IDE_VMDK_ImageUUID=$IDE_VMDK_ImageUUID "
VBoxManage storagectl $vboxMUUID --name "IDE Controller" --remove || true # remove IDE controller
VBoxManage storagectl $vboxMUUID --name "IDE" --remove || true # remove IDE controller
VBoxManage storagectl $vboxMUUID --name "SATA" --add sata --portcount 3 --hostiocache on --bootable on # Add SATA controller
VBoxManage storageattach $vboxMUUID --storagectl "SATA" --port 0 --type hdd --nonrotational on --medium $IDE_VMDK_ImageUUID # Attach the previous disk to the new SATA controller
#For SSD optionally add also: "--nonrotational on"
fi
#### Start the VM:
VBoxManage startvm $vboxMUUID --type headless # DO NOT USE 'vagrant up', use VBoxManage startvm command
done
done
###
echo " Machines were reconfigured and restarted. This is the vagrant status for virtuabox machines:"
vagrant status | grep '(virtualbox)'
fi # Up to here we did actions when if [ "${ACTIONS}" != "regenerate_configx" ]
###
echo "### Generating the list of machines which are up:"
all_runningVagrMs=$(vagrant status | grep 'running (virtualbox)' | cut -d" " -f1)
echo " List of already started machines: $all_runningVagrMs" | tr '\n' ' '
echo ""
###
echo "### (re)Generating a ssh_config to be used by ssh (partially reusing vagrant generated ssh keys and config)"
rm -f ssh_config
for runningVagrM in $all_runningVagrMs ; do
vagrant ssh-config $runningVagrM | sed "s|^Host .*|Host ${runningVagrM}\*|g" | sed "/^ *HostName .*/d" | sed "s|^ *Port .*| Port 22|g" | sed "s|^ *User .*| User root|g" >>ssh_config
done
###
#When below is enabled, ansible won't be able to run
#echo "### update ~vagrant/.ssh/authorized_keys inside each machine to automatically switch to root user (instead of vagrant) "
#set -vx
#for runningVagrM in $all_runningVagrMs ; do
# ssh -F ./ssh_config $runningVagrM sed -i \'s#^ssh-#command=\"sudo -iu root\" ssh-#g\' ~vagrant/.ssh/authorized_keys
#done
#set +vx
###
echo "### Creating list of machines with FQDN"
runningVagrM_FQDN=""
for runningVagrM in $all_runningVagrMs ; do
runningVagrM_FQDN="${runningVagrM_FQDN} `ping -c 1 ${runningVagrM} | head -1 |cut -d " " -f2`"
done
echo " List of FQDN for the vagrant machines: $runningVagrM_FQDN"
###
number_hosts_nonfqdn=`echo $all_runningVagrMs | wc -l`
number_hosts_fqdn=`echo $runningVagrM_FQDN | wc -l`
if [ $number_hosts_nonfqdn -ne $number_hosts_fqdn ]; then
echo "!WARNING!: FQDN is not properly set. Trying without..."
use_FQDN=0
all_runningVagrMs_postfqdn=$all_runningVagrMs
else
use_FQDN=1
all_runningVagrMs_postfqdn=$runningVagrM_FQDN
fi
echo " To ssh into any of the machines, run like this: "
echo ""
#for M in ${all_runningVagrMs} ; do
for M in ${all_runningVagrMs_postfqdn} ; do
echo "ssh -F ./ssh_config $M "
done
echo
###
echo "### Creating an ./ansible.cfg to based on the above ssh_config file and some more options, for ansible to be able to login "
cat << EOF >ansible.cfg
[defaults]
#remote_user=vagrant
become=true
become_method=sudo
stdout_callback = debug
[ssh_connection]
ssh_args = -C -o ControlMaster=auto -o ControlPersist=60s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -F ./ssh_config
pipelining = True
EOF
echo " a local ./ansible.cfg has been generated with success. Contents:"
cat ansible.cfg
echo
###
echo "### Generating (guessing) inventory file based on host names (master must have word master in its name)"
echo "[master]" > hosts
#echo $all_runningVagrMs | tr ' ' '\n' | grep "master" >> hosts
echo $all_runningVagrMs_postfqdn | tr ' ' '\n' | grep "master" >> hosts
echo "[node]" >> hosts
#echo $all_runningVagrMs | tr ' ' '\n' | grep -v "master" >> hosts
echo $all_runningVagrMs_postfqdn | tr ' ' '\n' | grep -v "master" >> hosts
number_nodes=$( echo $all_runningVagrMs_postfqdn | tr ' ' '\n' | grep -v "master" | wc -l )
if [ $number_nodes -lt 1 ]; then
echo "no nodes were detected, so master will be also a node"
echo $all_runningVagrMs_postfqdn | tr ' ' '\n' | grep "master" >> hosts
fi
echo
echo "### The autogenerated inventory (./hosts file) looks like this:"
cat hosts
cat <<EOF
### Vagrant should be up.
You may now proceed wth reviewing configuration:
vi group_vars/all
and then run ansible playbooks like site.yml
ansible-playbook -i hosts -v site.ym"
EOF