diff --git a/Vagrantfile b/Vagrantfile index d1a634b..516e667 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -3,56 +3,160 @@ # Define how much memory your computer has in GB (e.g. 8, 16) # Larger nodes will be created if you have more. -RAM_SIZE = 16 +RAM_SIZE = 8 # Define how mnay CPU cores you have. # More powerful workers will be created if you have more -CPU_CORES = 8 +CPU_CORES = 4 # Internal network prefix for the VM network # See the documentation before changing this IP_NW = "192.168.56." -Vagrant.configure("2") do |config| - config.vm.box = "debian/bookworm64" - config.vm.boot_timeout = 900 - config.vm.box_check_update = true +# Calculate resource amounts +# based on RAM/CPU +ram_selector = (RAM_SIZE / 4) * 4 +if ram_selector < 8 + raise "Unsufficient memory #{RAM_SIZE}GB. min 8GB" +end +RESOURCES = { + "control" => { + 1 => { + # controlplane01 bigger since it may run e2e tests. + "ram" => [ram_selector * 128, 2048].max(), + "cpu" => CPU_CORES >= 12 ? 4 : 2, + }, + 2 => { + # All additional masters get this + "ram" => [ram_selector * 128, 2048].min(), + "cpu" => CPU_CORES > 8 ? 2 : 1, + }, + }, + "worker" => { + "ram" => [ram_selector * 128, 4096].min(), + "cpu" => CPU_CORES > 8 ? 2 : 1, + }, +} + +# Sets up hosts file and DNS +def setup_dns(node) + # Set up /etc/hosts + node.vm.provision "setup-hosts", :type => "shell", :path => "vm/utils/setup-hosts.sh" do |s| + s.args = ["enp0s8", node.vm.hostname] + end + # Set up DNS resolution + # node.vm.provision "setup-dns", type: "shell", :path => "vm/utils/update-dns.sh" +end + +# Runs provisioning steps that are required by masters and workers +def provision_kubernetes_node(node) + # Set up kernel parameters, modules and tunables + node.vm.provision "setup-kernel", :type => "shell", :path => "vm/utils/setup-kernel.sh" + # Set up ssh + node.vm.provision "setup-ssh", :type => "shell", :path => "vm/utils/ssh.sh" + # Set up DNS + setup_dns node + # Set up with Ansible + node.vm.provision "ansible" do |ansible| + ansible.compatibility_mode = "2.0" + ansible.verbose = "v" + ansible.playbook = "ansible/provision.yml" + end +end + +# Define the number of master and worker nodes. You should not change this +NUM_CONTROL_NODES = 2 +NUM_WORKER_NODE = 2 - # Network - config.vm.hostname = PRJ_HOSTNAME - # FIXME: Change bridge adapter name and IP address according to your environment - # For exanple, I use eno1 and IP address 10.0.0.128/28 - config.vm.network "public_network", - bridge: "eno1", ip: "10.0.0.131", hostname: true +# Host address start points +MASTER_IP_START = 10 +NODE_IP_START = 20 +LB_IP_START = 30 - config.vm.network "forwarded_port", guest: 80, host: 8080, - auto_correct: true - config.vm.network "forwarded_port", guest: 443, host: 8443, - auto_correct: true +# All Vagrant configuration is done below. The "2" in Vagrant.configure +# configures the configuration version (we support older styles for +# backwards compatibility). Please don't change it unless you know what +# you're doing. +Vagrant.configure("2") do |config| + # The most common configuration options are documented and commented below. + # For a complete reference, please see the online documentation at + # https://docs.vagrantup.com. - config.vm.synced_folder "../drupal8-website", PRJ_DIR + # Every Vagrant development environment requires a box. You can search for + # boxes at https://vagrantcloud.com/search. + # config.vm.box = "base" + config.vm.box = "debian/bookworm64" + config.vm.boot_timeout = 900 - # Config for hypervisor - # config.vm.provider "hyperv" do |hv| - # and change all vb. to hv. - config.vm.provider "virtualbox" do |vb| - vb.name = PRJ_NAME + # Disable automatic box update checking. If you disable this, then + # boxes will only be checked for updates when the user runs + # `vagrant box outdated`. This is not recommended. + config.vm.box_check_update = false - vb.memory = "2048" - vb.cpus = "2" + # Provision Control Nodes + (1..NUM_CONTROL_NODES).each do |i| + config.vm.define "controlplane0#{i}" do |node| + # Name shown in the GUI + node.vm.provider "virtualbox" do |vb| + vb.name = "kubernetes-ha-controlplane-#{i}" + vb.memory = RESOURCES["control"][i > 2 ? 2 : i]["ram"] + vb.cpus = RESOURCES["control"][i > 2 ? 2 : i]["cpu"] + # Disable gui & audio for headless operation in VirtualBox + vb.gui = false + vb.customize ["modifyvm", :id, "--audio", "none"] + end + node.vm.hostname = "controlplane0#{i}" + node.vm.network :private_network, ip: IP_NW + "#{MASTER_IP_START + i}" + node.vm.network "forwarded_port", guest: 22, host: "#{2710 + i}" + provision_kubernetes_node node + if i == 1 + # Add cetificate verification scripts + node.vm.provision "file", source: "vm/utils/cert_verify.sh", destination: "$HOME/cert_verify.sh" + node.vm.provision "file", source: "vm/utils/approve-csr.sh", destination: "$HOME/approve-csr.sh" + end + end + end + # Provision Load Balancer Node + config.vm.define "loadbalancer" do |node| + node.vm.provider "virtualbox" do |vb| + vb.name = "kubernetes-ha-lb" + vb.memory = 512 + vb.cpus = 1 + # Disable gui & audio for headless operation in VirtualBox vb.gui = false vb.customize ["modifyvm", :id, "--audio", "none"] end + node.vm.hostname = "loadbalancer" + node.vm.network :private_network, ip: IP_NW + "#{LB_IP_START}" + node.vm.network "forwarded_port", guest: 22, host: 2730 + # Set up ssh + node.vm.provision "setup-ssh", :type => "shell", :path => "vm/utils/ssh.sh" + setup_dns node + end - config.vm.provision "file", - source: "~/.ssh/id_ed25519.pub", destination: "~/.ssh/me.pub" - - config.vm.provision "shell", inline: <<-SHELL - sudo apt-get update - sudo apt-get install -y openssh-server - cat /home/vagrant/.ssh/me.pub >> /home/vagrant/.ssh/authorized_keys - cat /home/vagrant/.ssh/me.pub >> /root/.ssh/authorized_keys - SHELL + # Provision Worker Nodes + (1..NUM_WORKER_NODE).each do |i| + config.vm.define "node0#{i}" do |node| + node.vm.provider "virtualbox" do |vb| + vb.name = "kubernetes-ha-node-#{i}" + vb.memory = RESOURCES["worker"]["ram"] + vb.cpus = RESOURCES["worker"]["cpu"] + vb.gui = false + vb.customize ["modifyvm", :id, "--audio", "none"] + end + node.vm.hostname = "node0#{i}" + node.vm.network :private_network, ip: IP_NW + "#{NODE_IP_START + i}" + node.vm.network "forwarded_port", guest: 22, host: "#{2720 + i}" + provision_kubernetes_node node + end + end + # Modify all nodes to not use GUI and audio + config.vm.define "all" do |node| + node.vm.provider "virtualbox" do |vb| + vb.gui = false + vb.customize ["modifyvm", :id, "--audio", "none"] + end end +end diff --git a/ansible.cfg b/ansible.cfg index 5587eb0..0249030 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -19,7 +19,7 @@ remote_tmp = "/tmp/ansible-${USER}" # Tweaks for readability and debug callbacks_enabled = profile_tasks stdout_callback = community.general.yaml -# cow_selection = bunny +cow_selection = bunny nocows = True force_color = True @@ -30,7 +30,7 @@ force_color = True # no_log = True [inventory] -enable_plugins = auto, yaml, kubernetes.core.k8s +enable_plugins = auto, yaml, kubernetes.core.k8s, ini [ssh_connection] # SSH CONFIG diff --git a/vm/Vagrantfile b/vm/Vagrantfile deleted file mode 100644 index fa40907..0000000 --- a/vm/Vagrantfile +++ /dev/null @@ -1,142 +0,0 @@ -# -*- mode: ruby -*- -# vi:set ft=ruby sw=2 ts=2 sts=2: - -# Define how much memory your computer has in GB (e.g. 8, 16) -# Larger nodes will be created if you have more. -RAM_SIZE = 16 - -# Define how mnay CPU cores you have. -# More powerful workers will be created if you have more -CPU_CORES = 8 - -# Internal network prefix for the VM network -# See the documentation before changing this -IP_NW = "192.168.56." - -# Calculate resource amounts -# based on RAM/CPU -ram_selector = (RAM_SIZE / 4) * 4 -if ram_selector < 8 - raise "Unsufficient memory #{RAM_SIZE}GB. min 8GB" -end -RESOURCES = { - "master" => { - 1 => { - # master-1 bigger since it may run e2e tests. - "ram" => [ram_selector * 128, 2048].max(), - "cpu" => CPU_CORES >= 12 ? 4 : 2, - }, - 2 => { - # All additional masters get this - "ram" => [ram_selector * 128, 2048].min(), - "cpu" => CPU_CORES > 8 ? 2 : 1, - }, - }, - "worker" => { - "ram" => [ram_selector * 128, 4096].min(), - "cpu" => (((CPU_CORES / 4) * 4) - 4) / 4, - }, -} - -# Sets up hosts file and DNS -def setup_dns(node) - # Set up /etc/hosts - node.vm.provision "setup-hosts", :type => "shell", :path => "debian/vagrant/setup-hosts.sh" do |s| - s.args = ["enp0s8", node.vm.hostname] - end - # Set up DNS resolution - node.vm.provision "setup-dns", type: "shell", :path => "debian/update-dns.sh" -end - -# Runs provisioning steps that are required by masters and workers -def provision_kubernetes_node(node) - # Set up kernel parameters, modules and tunables - node.vm.provision "setup-kernel", :type => "shell", :path => "debian/setup-kernel.sh" - # Set up ssh - node.vm.provision "setup-ssh", :type => "shell", :path => "debian/ssh.sh" - # Set up DNS - setup_dns node - # Install cert verification script - node.vm.provision "shell", inline: "ln -s /vagrant/debian/cert_verify.sh /home/vagrant/cert_verify.sh" -end - -# Define the number of master and worker nodes. You should not change this -NUM_MASTER_NODE = 2 -NUM_WORKER_NODE = 2 - -# Host address start points -MASTER_IP_START = 10 -NODE_IP_START = 20 -LB_IP_START = 30 - -# All Vagrant configuration is done below. The "2" in Vagrant.configure -# configures the configuration version (we support older styles for -# backwards compatibility). Please don't change it unless you know what -# you're doing. -Vagrant.configure("2") do |config| - # The most common configuration options are documented and commented below. - # For a complete reference, please see the online documentation at - # https://docs.vagrantup.com. - - # Every Vagrant development environment requires a box. You can search for - # boxes at https://vagrantcloud.com/search. - # config.vm.box = "base" - config.vm.box = "debian/bookworm64" - config.vm.boot_timeout = 900 - - # Disable automatic box update checking. If you disable this, then - # boxes will only be checked for updates when the user runs - # `vagrant box outdated`. This is not recommended. - config.vm.box_check_update = false - - # Provision Master Nodes - (1..NUM_MASTER_NODE).each do |i| - config.vm.define "master-#{i}" do |node| - # Name shown in the GUI - node.vm.provider "virtualbox" do |vb| - vb.name = "kubernetes-ha-master-#{i}" - vb.memory = RESOURCES["master"][i > 2 ? 2 : i]["ram"] - vb.cpus = RESOURCES["master"][i > 2 ? 2 : i]["cpu"] - end - node.vm.hostname = "master-#{i}" - node.vm.network :private_network, ip: IP_NW + "#{MASTER_IP_START + i}" - node.vm.network "forwarded_port", guest: 22, host: "#{2710 + i}" - provision_kubernetes_node node - if i == 1 - # Install (opinionated) configs for vim and tmux on master-1. These used by the author for CKA exam. - node.vm.provision "file", source: "./debian/tmux.conf", destination: "$HOME/.tmux.conf" - node.vm.provision "file", source: "./debian/vimrc", destination: "$HOME/.vimrc" - end - end - end - - # Provision Load Balancer Node - config.vm.define "loadbalancer" do |node| - node.vm.provider "virtualbox" do |vb| - vb.name = "kubernetes-ha-lb" - vb.memory = 512 - vb.cpus = 1 - end - node.vm.hostname = "loadbalancer" - node.vm.network :private_network, ip: IP_NW + "#{LB_IP_START}" - node.vm.network "forwarded_port", guest: 22, host: 2730 - # Set up ssh - node.vm.provision "setup-ssh", :type => "shell", :path => "debian/ssh.sh" - setup_dns node - end - - # Provision Worker Nodes - (1..NUM_WORKER_NODE).each do |i| - config.vm.define "worker-#{i}" do |node| - node.vm.provider "virtualbox" do |vb| - vb.name = "kubernetes-ha-worker-#{i}" - vb.memory = RESOURCES["worker"]["ram"] - vb.cpus = RESOURCES["worker"]["cpu"] - end - node.vm.hostname = "worker-#{i}" - node.vm.network :private_network, ip: IP_NW + "#{NODE_IP_START + i}" - node.vm.network "forwarded_port", guest: 22, host: "#{2720 + i}" - provision_kubernetes_node node - end - end -end diff --git a/vm/debian/ssh.sh b/vm/debian/ssh.sh deleted file mode 100644 index 476f681..0000000 --- a/vm/debian/ssh.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -# Enable password auth in sshd so we can use ssh-copy-id -sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/' /etc/ssh/sshd_config -systemctl restart sshd diff --git a/vm/debian/tmux.conf b/vm/debian/tmux.conf deleted file mode 100644 index 47714ae..0000000 --- a/vm/debian/tmux.conf +++ /dev/null @@ -1,50 +0,0 @@ -set -g default-shell /bin/bash -set -g mouse on -bind -n C-x setw synchronize-panes - -set -g bell-action none -set -g visual-bell off -set -g display-panes-time 2000 - -# set-window-option -g utf8 on -set -g default-terminal "screen-256color" - -# Vim-like bindings -setw -g mode-keys vi -bind "n" next-window -bind "p" previous-window -bind "C-n" next-window -bind "C-p" previous-window -bind "s" split-window -v -bind "v" split-window -h -bind "j" select-pane -D -bind "k" select-pane -U -bind "h" select-pane -L -bind "l" select-pane -R -bind -r "C-j" select-pane -D -bind -r "C-k" select-pane -U -bind -r "C-h" select-pane -L -bind -r "C-l" select-pane -R -bind "=" select-layout tiled -bind "!" break-pane - -# List of plugins -set -g @tpm_plugins ' \ - tmux-plugins/tpm \ - tmux-plugins/tmux-sensible \ - tmux-plugins/tmux-yank \ - tmux-plugins/tmux-pain-control \ - ' - -# Nord theme -set -g @plugin "arcticicestudio/nord-tmux" - -set -g @continuum-restore 'on' - -# Automatic tpm installation -if "test ! -d ~/.tmux/plugins/tpm" \ - "run 'git clone https://github.com/tmux-plugins/tpm ~/.tmux/plugins/tpm && ~/.tmux/plugins/tpm/bin/install_plugins'" - - -# Initialize TMUX plugin manager (keep this line at the very bottom of tmux.conf) -run '~/.tmux/plugins/tpm/tpm' diff --git a/vm/debian/vagrant/setup-hosts.sh b/vm/debian/vagrant/setup-hosts.sh deleted file mode 100644 index eef212a..0000000 --- a/vm/debian/vagrant/setup-hosts.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash -# -# Set up /etc/hosts so we can resolve all the machines in the VirtualBox network -set -ex -IFNAME=$1 -THISHOST=$2 -ADDRESS="$(ip -4 addr show $IFNAME | grep "inet" | head -1 |awk '{print $2}' | cut -d/ -f1)" -NETWORK=$(echo $ADDRESS | awk 'BEGIN {FS="."} ; { printf("%s.%s.%s", $1, $2, $3) }') -sed -e "s/^.*${HOSTNAME}.*/${ADDRESS} ${HOSTNAME} ${HOSTNAME}.local/" -i /etc/hosts - -# remove ubuntu-jammy entry -sed -e '/^.*ubuntu-jammy.*/d' -i /etc/hosts -sed -e "/^.*$2.*/d" -i /etc/hosts - -# Update /etc/hosts about other hosts -cat >> /etc/hosts < - -" Keep undo history across sessions, by storing in file. -set undodir=/tmp/.vim/backups -set undofile diff --git a/vm/utils/approve-csr.sh b/vm/utils/approve-csr.sh new file mode 100644 index 0000000..b1289c2 --- /dev/null +++ b/vm/utils/approve-csr.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +kubectl certificate approve --kubeconfig \ + admin.kubeconfig $(kubectl get csr --kubeconfig admin.kubeconfig -o json | \ + jq -r '.items | .[] | select(.spec.username == "system:node:node02") | .metadata.name') diff --git a/vm/debian/cert_verify.sh b/vm/utils/cert_verify.sh old mode 100755 new mode 100644 similarity index 92% rename from vm/debian/cert_verify.sh rename to vm/utils/cert_verify.sh index 7fa9403..3343ac8 --- a/vm/debian/cert_verify.sh +++ b/vm/utils/cert_verify.sh @@ -8,11 +8,11 @@ FAILED='\033[0;31;1m' NC='\033[0m' # IP addresses -INTERNAL_IP=$(ip addr show enp0s8 | grep "inet " | awk '{print $2}' | cut -d / -f 1) -MASTER_1=$(dig +short master-1) -MASTER_2=$(dig +short master-2) -WORKER_1=$(dig +short worker-1) -WORKER_2=$(dig +short worker-2) +PRIMARY_IP=$(ip addr show enp0s8 | grep "inet " | awk '{print $2}' | cut -d / -f 1) +CONTROL01=$(dig +short controlplane01) +CONTROL02=$(dig +short controlplane02) +NODE01=$(dig +short node01) +NODE02=$(dig +short node02) LOADBALANCER=$(dig +short loadbalancer) LOCALHOST="127.0.0.1" @@ -76,21 +76,21 @@ SYSTEMD_KS_FILE=/etc/systemd/system/kube-scheduler.service ### WORKER NODES ### # Worker-1 cert details -WORKER_1_CERT=/var/lib/kubelet/worker-1.crt -WORKER_1_KEY=/var/lib/kubelet/worker-1.key +NODE01_CERT=/var/lib/kubelet/node01.crt +NODE01_KEY=/var/lib/kubelet/node01.key # Worker-1 kubeconfig location -WORKER_1_KUBECONFIG=/var/lib/kubelet/kubeconfig +NODE01_KUBECONFIG=/var/lib/kubelet/kubeconfig # Worker-1 kubelet config location -WORKER_1_KUBELET=/var/lib/kubelet/kubelet-config.yaml +NODE01_KUBELET=/var/lib/kubelet/kubelet-config.yaml -# Systemd worker-1 kubelet location -SYSTEMD_WORKER_1_KUBELET=/etc/systemd/system/kubelet.service +# Systemd node01 kubelet location +SYSTEMD_NODE01_KUBELET=/etc/systemd/system/kubelet.service -# kube-proxy worker-1 location -WORKER_1_KP_KUBECONFIG=/var/lib/kube-proxy/kubeconfig -SYSTEMD_WORKER_1_KP=/etc/systemd/system/kube-proxy.service +# kube-proxy node01 location +NODE01_KP_KUBECONFIG=/var/lib/kube-proxy/kubeconfig +SYSTEMD_NODE01_KP=/etc/systemd/system/kube-proxy.service # Function - Master node # @@ -305,8 +305,8 @@ check_systemd_etcd() exit 1 fi - if [ $IAP_URL == "https://$INTERNAL_IP:2380" ] && [ $LP_URL == "https://$INTERNAL_IP:2380" ] && [ $LC_URL == "https://$INTERNAL_IP:2379,https://127.0.0.1:2379" ] && \ - [ $AC_URL == "https://$INTERNAL_IP:2379" ] + if [ $IAP_URL == "https://$PRIMARY_IP:2380" ] && [ $LP_URL == "https://$PRIMARY_IP:2380" ] && [ $LC_URL == "https://$PRIMARY_IP:2379,https://127.0.0.1:2379" ] && \ + [ $AC_URL == "https://$PRIMARY_IP:2379" ] then printf "${SUCCESS}ETCD initial-advertise-peer-urls, listen-peer-urls, listen-client-urls, advertise-client-urls are correct\n${NC}" else @@ -349,7 +349,7 @@ check_systemd_api() SACERT="${PKI}/service-account.crt" KCCERT="${PKI}/apiserver-kubelet-client.crt" KCKEY="${PKI}/apiserver-kubelet-client.key" - if [ $ADVERTISE_ADDRESS == $INTERNAL_IP ] && [ $CLIENT_CA_FILE == $CACERT ] && [ $ETCD_CA_FILE == $CACERT ] && \ + if [ $ADVERTISE_ADDRESS == $PRIMARY_IP ] && [ $CLIENT_CA_FILE == $CACERT ] && [ $ETCD_CA_FILE == $CACERT ] && \ [ $ETCD_CERT_FILE == "${PKI}/etcd-server.crt" ] && [ $ETCD_KEY_FILE == "${PKI}/etcd-server.key" ] && \ [ $KUBELET_CERTIFICATE_AUTHORITY == $CACERT ] && [ $KUBELET_CLIENT_CERTIFICATE == $KCCERT ] && [ $KUBELET_CLIENT_KEY == $KCKEY ] && \ [ $SERVICE_ACCOUNT_KEY_FILE == $SACERT ] && [ $TLS_CERT_FILE == $APICERT ] && [ $TLS_PRIVATE_KEY_FILE == $APIKEY ] @@ -435,15 +435,15 @@ if [ ! -z "$1" ] then choice=$1 else - echo "This script will validate the certificates in master as well as worker-1 nodes. Before proceeding, make sure you ssh into the respective node [ Master or Worker-1 ] for certificate validation" + echo "This script will validate the certificates in master as well as node01 nodes. Before proceeding, make sure you ssh into the respective node [ Master or Worker-1 ] for certificate validation" while true do echo echo " 1. Verify certificates on Master Nodes after step 4" echo " 2. Verify kubeconfigs on Master Nodes after step 5" echo " 3. Verify kubeconfigs and PKI on Master Nodes after step 8" - echo " 4. Verify kubeconfigs and PKI on worker-1 Node after step 10" - echo " 5. Verify kubeconfigs and PKI on worker-2 Node after step 11" + echo " 4. Verify kubeconfigs and PKI on node01 Node after step 10" + echo " 5. Verify kubeconfigs and PKI on node02 Node after step 11" echo echo -n "Please select one of the above options: " read choice @@ -469,9 +469,9 @@ SUBJ_APIKC="Subject:CN=kube-apiserver-kubelet-client,O=system:masters" case $choice in 1) - if ! [ "${HOST}" = "master-1" -o "${HOST}" = "master-2" ] + if ! [ "${HOST}" = "controlplane01" -o "${HOST}" = "controlplane02" ] then - printf "${FAILED}Must run on master-1 or master-2${NC}\n" + printf "${FAILED}Must run on controlplane01 or controlplane02${NC}\n" exit 1 fi @@ -486,7 +486,7 @@ case $choice in check_cert_and_key "apiserver-kubelet-client" $SUBJ_APIKC $CERT_ISSUER check_cert_and_key "etcd-server" $SUBJ_ETCD $CERT_ISSUER - if [ "${HOST}" = "master-1" ] + if [ "${HOST}" = "controlplane01" ] then check_cert_and_key "admin" $SUBJ_ADMIN $CERT_ISSUER check_cert_and_key "kube-proxy" $SUBJ_KP $CERT_ISSUER @@ -494,9 +494,9 @@ case $choice in ;; 2) - if ! [ "${HOST}" = "master-1" -o "${HOST}" = "master-2" ] + if ! [ "${HOST}" = "controlplane01" -o "${HOST}" = "controlplane02" ] then - printf "${FAILED}Must run on master-1 or master-2${NC}\n" + printf "${FAILED}Must run on controlplane01 or controlplane02${NC}\n" exit 1 fi @@ -504,16 +504,16 @@ case $choice in check_kubeconfig_exists "kube-controller-manager" $HOME check_kubeconfig_exists "kube-scheduler" $HOME - if [ "${HOST}" = "master-1" ] + if [ "${HOST}" = "controlplane01" ] then check_kubeconfig_exists "kube-proxy" $HOME fi ;; 3) - if ! [ "${HOST}" = "master-1" -o "${HOST}" = "master-2" ] + if ! [ "${HOST}" = "controlplane01" -o "${HOST}" = "controlplane02" ] then - printf "${FAILED}Must run on master-1 or master-2${NC}\n" + printf "${FAILED}Must run on controlplane01 or controlplane02${NC}\n" exit 1 fi @@ -540,24 +540,24 @@ case $choice in ;; 4) - if ! [ "${HOST}" = "worker-1" ] + if ! [ "${HOST}" = "node01" ] then - printf "${FAILED}Must run on worker-1${NC}\n" + printf "${FAILED}Must run on node01${NC}\n" exit 1 fi CERT_LOCATION=/var/lib/kubernetes/pki check_cert_only "ca" $SUBJ_CA $CERT_ISSUER check_cert_and_key "kube-proxy" $SUBJ_KP $CERT_ISSUER - check_cert_and_key "worker-1" "Subject:CN=system:node:worker-1,O=system:nodes" $CERT_ISSUER + check_cert_and_key "node01" "Subject:CN=system:node:node01,O=system:nodes" $CERT_ISSUER check_kubeconfig "kube-proxy" "/var/lib/kube-proxy" "https://${LOADBALANCER}:6443" check_kubeconfig "kubelet" "/var/lib/kubelet" "https://${LOADBALANCER}:6443" ;; 5) - if ! [ "${HOST}" = "worker-2" ] + if ! [ "${HOST}" = "node02" ] then - printf "${FAILED}Must run on worker-2${NC}\n" + printf "${FAILED}Must run on node02${NC}\n" exit 1 fi @@ -566,7 +566,7 @@ case $choice in check_cert_and_key "kube-proxy" $SUBJ_KP $CERT_ISSUER CERT_LOCATION=/var/lib/kubelet/pki - check_cert_only "kubelet-client-current" "Subject:O=system:nodes,CN=system:node:worker-2" $CERT_ISSUER + check_cert_only "kubelet-client-current" "Subject:O=system:nodes,CN=system:node:node02" $CERT_ISSUER check_kubeconfig "kube-proxy" "/var/lib/kube-proxy" "https://${LOADBALANCER}:6443" ;; diff --git a/vm/utils/setup-hosts.sh b/vm/utils/setup-hosts.sh new file mode 100644 index 0000000..4171f31 --- /dev/null +++ b/vm/utils/setup-hosts.sh @@ -0,0 +1,31 @@ +#!/bin/bash +# +# Set up /etc/hosts so we can resolve all the machines in the VirtualBox network +set -e +IFNAME=$1 +THISHOST=$2 + +# Host will have 3 interfaces: lo, DHCP assigned NAT network and static on VM network +# We want the VM network +PRIMARY_IP="$(ip -4 addr show | grep "inet" | egrep -v '(dynamic|127\.0\.0)' | awk '{print $2}' | cut -d/ -f1)" +NETWORK=$(echo $PRIMARY_IP | awk 'BEGIN {FS="."} ; { printf("%s.%s.%s", $1, $2, $3) }') +#sed -e "s/^.*${HOSTNAME}.*/${PRIMARY_IP} ${HOSTNAME} ${HOSTNAME}.local/" -i /etc/hosts + +# Export PRIMARY IP as an environment variable +echo "PRIMARY_IP=${PRIMARY_IP}" >> /etc/environment + +# Export architecture as environment variable to download correct versions of software +echo "ARCH=amd64" | sudo tee -a /etc/environment > /dev/null + +# remove ubuntu-jammy entry +# sed -e '/^.*ubuntu-jammy.*/d' -i /etc/hosts +sed -e "/^.*$2.*/d" -i /etc/hosts + +# Update /etc/hosts about other hosts +cat >> /etc/hosts < /dev/null + sh -c 'sudo apt-get install -y sshpass' &> /dev/null +fi + diff --git a/vm/debian/update-dns.sh b/vm/utils/update-dns.sh similarity index 100% rename from vm/debian/update-dns.sh rename to vm/utils/update-dns.sh diff --git a/vm/debian/vagrant/install-guest-additions.sh b/vm/vagrant/install-guest-additions.sh similarity index 62% rename from vm/debian/vagrant/install-guest-additions.sh rename to vm/vagrant/install-guest-additions.sh index c37d8d2..3a0c36f 100644 --- a/vm/debian/vagrant/install-guest-additions.sh +++ b/vm/vagrant/install-guest-additions.sh @@ -5,10 +5,23 @@ GUEST_ADDITION_MOUNT=/media/VBoxGuestAdditions apt-get install linux-headers-$(uname -r) build-essential dkms +# Download the VirtualBox Guest Additions ISO file wget http://download.virtualbox.org/virtualbox/${GUEST_ADDITION_VERSION}/${GUEST_ADDITION_ISO} + +# Create a directory to mount the ISO file mkdir -p ${GUEST_ADDITION_MOUNT} + +# Mount the ISO file to the specified directory with read-only option mount -o loop,ro ${GUEST_ADDITION_ISO} ${GUEST_ADDITION_MOUNT} + +# Run the VBoxLinuxAdditions installation script sh ${GUEST_ADDITION_MOUNT}/VBoxLinuxAdditions.run + +# Remove the downloaded ISO file rm ${GUEST_ADDITION_ISO} + +# Unmount the ISO file from the directory umount ${GUEST_ADDITION_MOUNT} + +# Remove the directory used for mounting rmdir ${GUEST_ADDITION_MOUNT}