diff --git a/ansible/kubernetes/inventory/group_vars/all/k3s.yml b/ansible/kubernetes/inventory/group_vars/all/k3s.yml index ccedf04f2..ace84169b 100644 --- a/ansible/kubernetes/inventory/group_vars/all/k3s.yml +++ b/ansible/kubernetes/inventory/group_vars/all/k3s.yml @@ -1,13 +1,13 @@ --- # renovate: datasource=github-releases depName=k3s-io/k3s -#k3s_release_version: "v1.27.4+k3s1" -k3s_release_version: "v1.24.8+k3s1" +k3s_release_version: "v1.27.4+k3s1" +#k3s_release_version: "v1.24.8+k3s1" k3s_install_hard_links: true k3s_become: true k3s_debug: false k3s_etcd_datastore: true -k3s_registration_address: 192.168.3.30 # dev -k3s_use_unsupported_config: true +k3s_registration_address: 192.168.4.11 # dev + #k3s_use_unsupported_config: true #k3s_registries: # mirrors: # "docker.io": diff --git a/ansible/kubernetes/inventory/group_vars/master/k3s.yml b/ansible/kubernetes/inventory/group_vars/master/k3s.yml index 30ef70f5b..a9fb289c8 100644 --- a/ansible/kubernetes/inventory/group_vars/master/k3s.yml +++ b/ansible/kubernetes/inventory/group_vars/master/k3s.yml @@ -15,15 +15,8 @@ k3s_server: cluster-cidr: "10.42.0.0/16" service-cidr: "10.43.0.0/16" etcd-expose-metrics: true - kubelet-arg: - # https://github.com/k3s-io/k3s/issues/1264 - - "node-status-update-frequency=4s" kube-controller-manager-arg: - "bind-address=0.0.0.0" - # https://github.com/k3s-io/k3s/issues/1264 - - "node-monitor-period=4s" - - "node-monitor-grace-period=16s" - - "pod-eviction-timeout=20s" kube-proxy-arg: - "metrics-bind-address=0.0.0.0" kube-scheduler-arg: @@ -33,6 +26,3 @@ k3s_server: - "--audit-policy-file=/var/lib/rancher/k3s/audit-policy.yaml" - "--audit-log-maxage=2" - "anonymous-auth=true" - # https://github.com/k3s-io/k3s/issues/1264 - - "default-not-ready-toleration-seconds=20" - - "default-unreachable-toleration-seconds=20" diff --git a/ansible/kubernetes/inventory/hosts_dev.yml b/ansible/kubernetes/inventory/hosts_dev.yml index dc70adc6d..379320142 100644 --- a/ansible/kubernetes/inventory/hosts_dev.yml +++ b/ansible/kubernetes/inventory/hosts_dev.yml @@ -9,13 +9,17 @@ kubernetes: hosts: k3s-cp-00: ansible_host: 192.168.3.30 - worker: - vars: - ansible_user: yin - ansible_ssh_port: 22 - ansible_ssh_common_args: "-o StrictHostKeyChecking=no" - hosts: - k3s-worker-00: + k3s-cp-01: ansible_host: 192.168.3.31 - k3s-worker-01: + k3s-cp-02: ansible_host: 192.168.3.32 +# worker: +# vars: +# ansible_user: yin +# ansible_ssh_port: 22 +# ansible_ssh_common_args: "-o StrictHostKeyChecking=no" +# hosts: +# k3s-worker-00: +# ansible_host: 192.168.3.30 +# k3s-worker-01: +# ansible_host: 192.168.3.32 diff --git a/ansible/onedr0p/.envrc b/ansible/onedr0p/.envrc new file mode 100644 index 000000000..a3eca56fa --- /dev/null +++ b/ansible/onedr0p/.envrc @@ -0,0 +1,8 @@ +#shellcheck disable=SC2148,SC2155 +export SOPS_AGE_KEY_FILE="$(expand_path ../../age.key)" +export VIRTUAL_ENV="$(expand_path ../../.venv)" +export ANSIBLE_COLLECTIONS_PATH=$(expand_path ../../.venv/galaxy) +export ANSIBLE_ROLES_PATH=$(expand_path ../../.venv/galaxy/ansible_roles) +export ANSIBLE_VARS_ENABLED="host_group_vars,community.sops.sops" +export ANSIBLE_INVENTORY=$(expand_path ./inventory/hosts.yaml) +PATH_add "$(expand_path ../../.venv/bin)" diff --git a/ansible/onedr0p/inventory/group_vars/all/main.yaml b/ansible/onedr0p/inventory/group_vars/all/main.yaml new file mode 100644 index 000000000..2b31fde3f --- /dev/null +++ b/ansible/onedr0p/inventory/group_vars/all/main.yaml @@ -0,0 +1,22 @@ +--- +# renovate: datasource=github-releases depName=k3s-io/k3s +k3s_release_version: "v1.27.4+k3s1" +k3s_install_hard_links: true +k3s_become: true +k3s_etcd_datastore: true +k3s_registration_address: 192.168.3.30 +k3s_use_unsupported_config: true +# /var/lib/rancher/k3s/server/manifests +k3s_server_manifests_urls: + # Kube-vip RBAC + - url: https://raw.githubusercontent.com/kube-vip/kube-vip/main/docs/manifests/rbac.yaml + filename: kube-vip-rbac.yaml + # Essential Prometheus Operator CRDs (the rest are installed with the kube-prometheus-stack helm release) + - url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml + filename: custom-prometheus-podmonitors.yaml + - url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml + filename: custom-prometheus-prometheusrules.yaml + - url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml + filename: custom-prometheus-scrapeconfigs.yaml + - url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml + filename: custom-prometheus-servicemonitors.yaml diff --git a/ansible/onedr0p/inventory/group_vars/all/supplemental.yaml b/ansible/onedr0p/inventory/group_vars/all/supplemental.yaml new file mode 100644 index 000000000..ff7c58977 --- /dev/null +++ b/ansible/onedr0p/inventory/group_vars/all/supplemental.yaml @@ -0,0 +1,3 @@ +--- +github_username: leanxia +timezone: America/Los_Angeles diff --git a/ansible/onedr0p/inventory/group_vars/master/main.yaml b/ansible/onedr0p/inventory/group_vars/master/main.yaml new file mode 100644 index 000000000..8f3b2cbc7 --- /dev/null +++ b/ansible/onedr0p/inventory/group_vars/master/main.yaml @@ -0,0 +1,28 @@ +--- +k3s_control_node: true +k3s_server: + node-ip: "{{ ansible_host }}" + tls-san: + - "{{ k3s_registration_address }}" + https-listen-port: 6443 + docker: false + flannel-backend: "none" # quote + disable: + - coredns + - local-storage + - metrics-server + - servicelb + - traefik + disable-network-policy: true + disable-cloud-controller: true + disable-kube-proxy: true + cluster-cidr: 10.32.0.0/16 + service-cidr: 10.33.0.0/16 + write-kubeconfig-mode: "0644" + etcd-expose-metrics: true + kube-controller-manager-arg: + - bind-address=0.0.0.0 + kube-scheduler-arg: + - bind-address=0.0.0.0 + kube-apiserver-arg: + - anonymous-auth=true diff --git a/ansible/onedr0p/inventory/group_vars/worker/main.yaml b/ansible/onedr0p/inventory/group_vars/worker/main.yaml new file mode 100644 index 000000000..5fdb4638a --- /dev/null +++ b/ansible/onedr0p/inventory/group_vars/worker/main.yaml @@ -0,0 +1,4 @@ +--- +k3s_control_node: false +k3s_agent: + node-ip: "{{ ansible_host }}" diff --git a/ansible/onedr0p/inventory/hosts_dev.yml b/ansible/onedr0p/inventory/hosts_dev.yml new file mode 100644 index 000000000..dc70adc6d --- /dev/null +++ b/ansible/onedr0p/inventory/hosts_dev.yml @@ -0,0 +1,21 @@ +--- +kubernetes: + children: + master: + vars: + ansible_user: yin + ansible_ssh_port: 22 + ansible_ssh_common_args: "-o StrictHostKeyChecking=no" + hosts: + k3s-cp-00: + ansible_host: 192.168.3.30 + worker: + vars: + ansible_user: yin + ansible_ssh_port: 22 + ansible_ssh_common_args: "-o StrictHostKeyChecking=no" + hosts: + k3s-worker-00: + ansible_host: 192.168.3.31 + k3s-worker-01: + ansible_host: 192.168.3.32 diff --git a/ansible/onedr0p/playbooks/cluster-ceph-reset.yaml b/ansible/onedr0p/playbooks/cluster-ceph-reset.yaml new file mode 100644 index 000000000..7acd285c5 --- /dev/null +++ b/ansible/onedr0p/playbooks/cluster-ceph-reset.yaml @@ -0,0 +1,39 @@ +--- +- name: Reset Ceph Drives + hosts: all + become: true + gather_facts: true + any_errors_fatal: true + pre_tasks: + - name: Pausing for 2 seconds... + ansible.builtin.pause: + seconds: 2 + tasks: + - name: Reset Ceph Drives # noqa: ignore-errors + ignore_errors: true + when: ceph_drives | default([]) | length > 0 + block: + - name: Delete (/var/lib/rook) + ansible.builtin.file: + state: absent + path: /var/lib/rook + - name: Delete (/dev/mapper/ceph-*) # noqa: no-changed-when + ansible.builtin.shell: | + set -o pipefail + ls /dev/mapper/ceph-* | xargs -I% -- dmsetup remove_all --force % || true + - name: Delete (/dev/ceph-*) # noqa: no-changed-when + ansible.builtin.command: rm -rf /dev/ceph-* + - name: Delete (/dev/mapper/ceph--*) # noqa: no-changed-when + ansible.builtin.command: rm -rf /dev/mapper/ceph--* + - name: Wipe (sgdisk) # noqa: no-changed-when + ansible.builtin.command: "sgdisk --zap-all {{ item }}" + loop: "{{ ceph_drives }}" + - name: Wipe (dd) # noqa: no-changed-when + ansible.builtin.command: "dd if=/dev/zero of={{ item }} bs=1M count=100 oflag=direct,dsync" + loop: "{{ ceph_drives }}" + - name: Wipe (blkdiscard) # noqa: no-changed-when + ansible.builtin.command: "blkdiscard {{ item }}" + loop: "{{ ceph_drives }}" + - name: Wipe (partprobe) # noqa: no-changed-when + ansible.builtin.command: "partprobe {{ item }}" + loop: "{{ ceph_drives }}" diff --git a/ansible/onedr0p/playbooks/cluster-installation.yaml b/ansible/onedr0p/playbooks/cluster-installation.yaml new file mode 100644 index 000000000..682eb1dae --- /dev/null +++ b/ansible/onedr0p/playbooks/cluster-installation.yaml @@ -0,0 +1,69 @@ +--- +- name: Cluster Installation + hosts: all + become: true + gather_facts: true + any_errors_fatal: true + pre_tasks: + - name: Pausing for 2 seconds... + ansible.builtin.pause: + seconds: 2 + tasks: + - name: Check if cluster is installed + check_mode: false + ansible.builtin.stat: + path: /etc/rancher/k3s/config.yaml + register: k3s_installed + + - name: Ignore manifests templates and urls if the cluster is already installed + when: k3s_installed.stat.exists + ansible.builtin.set_fact: + k3s_server_manifests_templates: [] + k3s_server_manifests_urls: [] + + - name: Install Kubernetes + ansible.builtin.include_role: + name: xanmanning.k3s + public: true + vars: + k3s_state: installed + + - name: Wait for custom manifests to rollout + when: + - k3s_primary_control_node + - (k3s_server_manifests_templates | length > 0 + or k3s_server_manifests_urls | length > 0) + kubernetes.core.k8s_info: + kubeconfig: /etc/rancher/k3s/k3s.yaml + kind: "{{ item.kind }}" + name: "{{ item.name }}" + namespace: "{{ item.namespace | default('') }}" + wait: true + wait_sleep: 10 + wait_timeout: 360 + loop: + - { name: cilium, kind: HelmChart, namespace: kube-system } + - { name: coredns, kind: HelmChart, namespace: kube-system } + - { name: policy, kind: CiliumL2AnnouncementPolicy } + - { name: pool, kind: CiliumLoadBalancerIPPool } + - { name: podmonitors.monitoring.coreos.com, kind: CustomResourceDefinition } + - { name: prometheusrules.monitoring.coreos.com, kind: CustomResourceDefinition } + - { name: scrapeconfigs.monitoring.coreos.com, kind: CustomResourceDefinition } + - { name: servicemonitors.monitoring.coreos.com, kind: CustomResourceDefinition } + + - name: Coredns + when: k3s_primary_control_node + ansible.builtin.include_tasks: tasks/coredns.yaml + + - name: Cilium + when: k3s_primary_control_node + ansible.builtin.include_tasks: tasks/cilium.yaml + + - name: Cruft + when: k3s_primary_control_node + ansible.builtin.include_tasks: tasks/cruft.yaml + + - name: Stale Containers + ansible.builtin.include_tasks: tasks/stale_containers.yaml + vars: + stale_containers_state: enabled diff --git a/ansible/onedr0p/playbooks/cluster-nuke.yaml b/ansible/onedr0p/playbooks/cluster-nuke.yaml new file mode 100644 index 000000000..8110f2914 --- /dev/null +++ b/ansible/onedr0p/playbooks/cluster-nuke.yaml @@ -0,0 +1,61 @@ +--- +- name: Cluster Nuke + hosts: all + become: true + gather_facts: true + any_errors_fatal: true + pre_tasks: + - name: Pausing for 2 seconds... + ansible.builtin.pause: + seconds: 2 + tasks: + - name: Stop Kubernetes # noqa: ignore-errors + ignore_errors: true + block: + - name: Stop Kubernetes + ansible.builtin.include_role: + name: xanmanning.k3s + public: true + vars: + k3s_state: stopped + + # https://github.com/k3s-io/docs/blob/main/docs/installation/network-options.md + - name: Networking + block: + - name: Networking | Delete Cilium links + ansible.builtin.command: + cmd: "ip link delete {{ item }}" + removes: "/sys/class/net/{{ item }}" + loop: ["cilium_host", "cilium_net", "cilium_vxlan"] + - name: Networking | Flush iptables + ansible.builtin.iptables: + table: "{{ item }}" + flush: true + loop: ["filter", "nat", "mangle", "raw"] + - name: Networking | Flush ip6tables + ansible.builtin.iptables: + table: "{{ item }}" + flush: true + ip_version: ipv6 + loop: ["filter", "nat", "mangle", "raw"] + - name: Networking | Delete CNI directory + ansible.builtin.file: + path: /etc/cni/net.d + state: absent + + - name: Uninstall Kubernetes + ansible.builtin.include_role: + name: xanmanning.k3s + public: true + vars: + k3s_state: uninstalled + + - name: Stale Containers + ansible.builtin.include_tasks: tasks/stale_containers.yaml + vars: + stale_containers_state: disabled + + - name: Reboot + ansible.builtin.reboot: + msg: Rebooting nodes + reboot_timeout: 3600 diff --git a/ansible/onedr0p/playbooks/cluster-prepare.yaml b/ansible/onedr0p/playbooks/cluster-prepare.yaml new file mode 100644 index 000000000..384bd8012 --- /dev/null +++ b/ansible/onedr0p/playbooks/cluster-prepare.yaml @@ -0,0 +1,200 @@ +--- +- name: Prepare System + hosts: all + become: true + gather_facts: true + any_errors_fatal: true + pre_tasks: + - name: Pausing for 2 seconds... + ansible.builtin.pause: + seconds: 2 + tasks: + - name: Locale + block: + - name: Locale | Set timezone + community.general.timezone: + name: "{{ timezone | default('Etc/UTC') }}" + + - name: Packages + block: + - name: Packages | Add fish key + ansible.builtin.get_url: + url: https://download.opensuse.org/repositories/shells:fish:release:3/Debian_12/Release.key + dest: /etc/apt/trusted.gpg.d/fish.asc + owner: root + group: root + mode: "0644" + - name: Packages | Add fish repository + ansible.builtin.apt_repository: + repo: deb [signed-by=/etc/apt/trusted.gpg.d/fish.asc] http://download.opensuse.org/repositories/shells:/fish:/release:/3/Debian_12/ / + filename: fish + update_cache: true + - name: Packages | Add non-free repository + ansible.builtin.apt_repository: + repo: deb http://deb.debian.org/debian/ stable main contrib non-free + filename: non-free + update_cache: true + - name: Packages | Install + ansible.builtin.apt: + name: i965-va-driver-shaders,apt-transport-https,ca-certificates,conntrack,curl,dirmngr,fish,gdisk, + gnupg,hdparm,htop,intel-gpu-tools,intel-media-va-driver-non-free,iperf3,iptables,iputils-ping,ipvsadm, + libseccomp2,lm-sensors,neofetch,net-tools,nfs-common,nvme-cli,open-iscsi,parted,psmisc,python3, + python3-apt,python3-openshift,python3-kubernetes,python3-yaml,smartmontools,socat,software-properties-common, + unzip,util-linux + install_recommends: false + + - name: User Configuration + block: + - name: User Configuration | SSH keys + ansible.posix.authorized_key: + user: "{{ ansible_user }}" + key: "https://github.com/{{ github_username }}.keys" + - name: User Configuration | Silence login + ansible.builtin.file: + dest: "{{ '/home/' + ansible_user if ansible_user != 'root' else '/root' }}/.hushlogin" + state: touch + owner: "{{ ansible_user }}" + group: "{{ ansible_user }}" + mode: "0644" + modification_time: preserve + access_time: preserve + - name: User Configuration | Add user to sudoers + when: ansible_user != 'root' + ansible.builtin.copy: + content: "{{ ansible_user }} ALL=(ALL:ALL) NOPASSWD:ALL" + dest: "/etc/sudoers.d/{{ ansible_user }}" + owner: root + group: root + mode: "0440" + - name: User Configuration | Fish shell (1) + ansible.builtin.user: + name: "{{ ansible_user }}" + shell: /usr/bin/fish + - name: User Configuration | Fish shell (2) + ansible.builtin.file: + path: "{{ '/home/' + ansible_user if ansible_user != 'root' else '/root' }}/.config/fish/functions" + state: directory + owner: "{{ ansible_user }}" + group: "{{ ansible_user }}" + recurse: true + - name: User Configuration | Fish shell (3) + ansible.builtin.copy: + dest: "{{ '/home/' + ansible_user if ansible_user != 'root' else '/root' }}/.config/fish/functions/fish_greeting.fish" + owner: "{{ ansible_user }}" + group: "{{ ansible_user }}" + mode: "0755" + content: neofetch --config none + - name: User Configuration | Fish shell (3) + ansible.builtin.copy: + dest: "{{ '/home/' + ansible_user if ansible_user != 'root' else '/root' }}/.config/fish/functions/k.fish" + owner: "{{ ansible_user }}" + group: "{{ ansible_user }}" + mode: "0755" + content: | + function k --wraps=kubectl --description 'kubectl shorthand' + kubectl $argv + end + + - name: Network Configuration + notify: Reboot + block: + - name: Network Configuration | Set hostname + ansible.builtin.hostname: + name: "{{ inventory_hostname }}" + - name: Network Configuration | Update hosts + ansible.builtin.copy: + dest: /etc/hosts + content: | + 127.0.0.1 localhost + 127.0.1.1 {{ inventory_hostname }} + + # The following lines are desirable for IPv6 capable hosts + ::1 localhost ip6-localhost ip6-loopback + ff02::1 ip6-allnodes + ff02::2 ip6-allrouters + mode: preserve + # https://github.com/cilium/cilium/issues/18706 + - name: Network Configuration | Cilium (1) + ansible.builtin.lineinfile: + dest: /etc/systemd/networkd.conf + regexp: ManageForeignRoutingPolicyRules + line: ManageForeignRoutingPolicyRules=no + - name: Network Configuration | Cilium (2) + ansible.builtin.lineinfile: + dest: /etc/systemd/networkd.conf + regexp: ManageForeignRoutes + line: ManageForeignRoutes=no + - name: Network Configuration | Set NIC + ansible.builtin.copy: + dest: /etc/network/interfaces.d/enp7s0 + content: | + auto enp7s0 + allow-hotplug enp7s0 + iface enp7s0 inet dhcp + post-up ifconfig enp7s0 mtu 9000 + owner: root + group: root + mode: "0644" + + - name: System Configuration + notify: Reboot + block: + - name: System Configuration | Neofetch + ansible.builtin.copy: + dest: /etc/profile.d/neofetch.sh + mode: "0755" + content: neofetch --config none + - name: System Configuration | Disable apparmor + ansible.builtin.systemd: + name: apparmor + state: stopped + masked: true + - name: System Configuration | Disable swap + ansible.posix.mount: + name: "{{ item }}" + fstype: swap + state: absent + loop: ["none", "swap"] + - name: System Configuration | Kernel modules (1) + community.general.modprobe: + name: "{{ item }}" + state: present + loop: ["br_netfilter", "ceph", "ip_vs", "ip_vs_rr", "nbd", "overlay", "rbd"] + - name: System Configuration | Kernel modules (2) + ansible.builtin.copy: + dest: "/etc/modules-load.d/{{ item }}.conf" + mode: "0644" + content: "{{ item }}" + loop: ["br_netfilter", "ceph", "ip_vs", "ip_vs_rr", "nbd", "overlay", "rbd"] + - name: System Configuration | Sysctl + ansible.posix.sysctl: + name: "{{ item.key }}" + value: "{{ item.value }}" + sysctl_file: /etc/sysctl.d/99-kubernetes.conf + reload: true + with_dict: "{{ sysctl_config }}" + vars: + sysctl_config: + fs.inotify.max_queued_events: 65536 + fs.inotify.max_user_watches: 524288 + fs.inotify.max_user_instances: 8192 + - name: System Configuration | Grub (1) + ansible.builtin.replace: + path: /etc/default/grub + regexp: '^(GRUB_CMDLINE_LINUX=(?:(?![" ]{{ item.key | regex_escape }}=).)*)(?:[" ]{{ item.key | regex_escape }}=\S+)?(.*")$' + replace: '\1 {{ item.key }}={{ item.value }}\2' + with_dict: "{{ grub_config }}" + vars: + grub_config: + apparmor: "0" + mitigations: "off" + register: grub_status + - name: System Configuration | Grub (2) # noqa: no-changed-when no-handler + ansible.builtin.command: update-grub + when: grub_status.changed + + handlers: + - name: Reboot + ansible.builtin.reboot: + msg: Rebooting nodes + reboot_timeout: 3600 diff --git a/ansible/onedr0p/playbooks/cluster-update-rollout.yaml b/ansible/onedr0p/playbooks/cluster-update-rollout.yaml new file mode 100644 index 000000000..b30fa3b59 --- /dev/null +++ b/ansible/onedr0p/playbooks/cluster-update-rollout.yaml @@ -0,0 +1,75 @@ +--- +# https://github.com/kevincoakley/ansible-role-k8s-rolling-update +- name: Cluster update rollout + hosts: all + become: true + gather_facts: true + any_errors_fatal: true + serial: 1 + pre_tasks: + - name: Pausing for 2 seconds... + ansible.builtin.pause: + seconds: 2 + tasks: + - name: Details + ansible.builtin.command: "kubectl get node {{ inventory_hostname }} -o json" + register: kubectl_get_node + delegate_to: "{{ groups['master'][0] }}" + failed_when: false + changed_when: false + + - name: Update + when: + # When status.conditions[x].type == Ready then check stats.conditions[x].status for True|False + - kubectl_get_node['stdout'] | from_json | json_query("status.conditions[?type == 'Ready'].status") + # If spec.unschedulable is defined then the node is cordoned + - not (kubectl_get_node['stdout'] | from_json).spec.unschedulable is defined + block: + - name: Cordon + ansible.builtin.command: "kubectl cordon {{ inventory_hostname }}" + delegate_to: "{{ groups['master'][0] }}" + changed_when: false + + - name: Wait to cordon + ansible.builtin.command: "kubectl get node {{ inventory_hostname }} -o json" + register: wait_for_cordon + retries: 10 + delay: 10 + delegate_to: "{{ groups['master'][0] }}" + changed_when: false + until: (wait_for_cordon['stdout'] | from_json).spec.unschedulable + + - name: Drain + ansible.builtin.command: "kubectl drain --ignore-daemonsets --delete-emptydir-data --force {{ inventory_hostname }}" + delegate_to: "{{ groups['master'][0] }}" + changed_when: false + + - name: Update + ansible.builtin.apt: + upgrade: dist + update_cache: true + + - name: Check if reboot is required + ansible.builtin.stat: + path: /var/run/reboot-required + register: reboot_required + + - name: Reboot + when: reboot_required.stat.exists + ansible.builtin.reboot: + msg: Rebooting node + post_reboot_delay: 120 + reboot_timeout: 3600 + + - name: Uncordon + ansible.builtin.command: "kubectl uncordon {{ inventory_hostname }}" + delegate_to: "{{ groups['master'][0] }}" + changed_when: false + + - name: Wait to uncordon + ansible.builtin.command: "kubectl get node {{ inventory_hostname }} -o json" + retries: 10 + delay: 10 + delegate_to: "{{ groups['master'][0] }}" + changed_when: false + until: not (kubectl_get_node['stdout'] | from_json).spec.unschedulable is defined diff --git a/ansible/onedr0p/playbooks/files/stale-containers.service b/ansible/onedr0p/playbooks/files/stale-containers.service new file mode 100644 index 000000000..5136df2f6 --- /dev/null +++ b/ansible/onedr0p/playbooks/files/stale-containers.service @@ -0,0 +1,6 @@ +[Unit] +Description=Stale containers + +[Service] +Type=oneshot +ExecStart=/usr/local/bin/k3s crictl rmi --prune diff --git a/ansible/onedr0p/playbooks/files/stale-containers.timer b/ansible/onedr0p/playbooks/files/stale-containers.timer new file mode 100644 index 000000000..731885a14 --- /dev/null +++ b/ansible/onedr0p/playbooks/files/stale-containers.timer @@ -0,0 +1,11 @@ +[Unit] +Description=Stale containers + +[Timer] +OnCalendar=weekly +AccuracySec=1h +Persistent=true +RandomizedDelaySec=6000 + +[Install] +WantedBy=timers.target diff --git a/ansible/onedr0p/playbooks/tasks/cilium.yaml b/ansible/onedr0p/playbooks/tasks/cilium.yaml new file mode 100644 index 000000000..ca242bb03 --- /dev/null +++ b/ansible/onedr0p/playbooks/tasks/cilium.yaml @@ -0,0 +1,56 @@ +--- +- name: Cilium + block: + - name: Cilium | Check if Cilium HelmChart exists + kubernetes.core.k8s_info: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: cilium + kind: HelmChart + namespace: kube-system + register: cilium_helmchart + + - name: Cilium | Wait for Cilium to rollout + when: cilium_helmchart.resources | count > 0 + kubernetes.core.k8s_info: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: helm-install-cilium + kind: Job + namespace: kube-system + wait: true + wait_condition: + type: Complete + status: true + wait_timeout: 360 + + - name: Cilium | Patch the Cilium HelmChart to unmanage it + when: cilium_helmchart.resources | count > 0 + kubernetes.core.k8s_json_patch: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: cilium + kind: HelmChart + namespace: kube-system + patch: + - op: add + path: /metadata/annotations/helmcharts.helm.cattle.io~1unmanaged + value: "true" + + - name: Cilium | Delete the Cilium HelmChart CR + when: cilium_helmchart.resources | count > 0 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: cilium + kind: HelmChart + namespace: kube-system + state: absent + + - name: Cilium | Force delete the Cilium HelmChart + when: cilium_helmchart.resources | count > 0 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: cilium + kind: HelmChart + namespace: kube-system + state: patched + definition: + metadata: + finalizers: [] diff --git a/ansible/onedr0p/playbooks/tasks/coredns.yaml b/ansible/onedr0p/playbooks/tasks/coredns.yaml new file mode 100644 index 000000000..d18383a75 --- /dev/null +++ b/ansible/onedr0p/playbooks/tasks/coredns.yaml @@ -0,0 +1,56 @@ +--- +- name: Coredns + block: + - name: Coredns | Check if Coredns HelmChart exists + kubernetes.core.k8s_info: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: coredns + kind: HelmChart + namespace: kube-system + register: coredns_helmchart + + - name: Coredns | Wait for Coredns to rollout + when: coredns_helmchart.resources | count > 0 + kubernetes.core.k8s_info: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: helm-install-coredns + kind: Job + namespace: kube-system + wait: true + wait_condition: + type: Complete + status: true + wait_timeout: 360 + + - name: Coredns | Patch the Coredns HelmChart to unmanage it + when: coredns_helmchart.resources | count > 0 + kubernetes.core.k8s_json_patch: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: coredns + kind: HelmChart + namespace: kube-system + patch: + - op: add + path: /metadata/annotations/helmcharts.helm.cattle.io~1unmanaged + value: "true" + + - name: Coredns | Delete the Coredns HelmChart CR + when: coredns_helmchart.resources | count > 0 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: coredns + kind: HelmChart + namespace: kube-system + state: absent + + - name: Coredns | Force delete the Coredns HelmChart + when: coredns_helmchart.resources | count > 0 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: coredns + kind: HelmChart + namespace: kube-system + state: patched + definition: + metadata: + finalizers: [] diff --git a/ansible/onedr0p/playbooks/tasks/cruft.yaml b/ansible/onedr0p/playbooks/tasks/cruft.yaml new file mode 100644 index 000000000..66ae984f2 --- /dev/null +++ b/ansible/onedr0p/playbooks/tasks/cruft.yaml @@ -0,0 +1,32 @@ +--- +# https://github.com/k3s-io/k3s/issues/1971 +- name: Cruft + block: + - name: Cruft | Get list of custom mantifests + ansible.builtin.find: + paths: "{{ k3s_server_manifests_dir }}" + file_type: file + use_regex: true + patterns: ["^custom-.*"] + register: custom_manifest + + - name: Cruft | Delete custom mantifests + ansible.builtin.file: + path: "{{ item.path }}" + state: absent + loop: "{{ custom_manifest.files }}" + + - name: Cruft | Get list of custom addons + kubernetes.core.k8s_info: + kubeconfig: /etc/rancher/k3s/k3s.yaml + kind: Addon + register: addons_list + + - name: Cruft | Delete addons + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: "{{ item.metadata.name }}" + kind: Addon + namespace: kube-system + state: absent + loop: "{{ addons_list.resources | selectattr('metadata.name', 'match', '^custom-.*') | list }}" diff --git a/ansible/onedr0p/playbooks/tasks/stale_containers.yaml b/ansible/onedr0p/playbooks/tasks/stale_containers.yaml new file mode 100644 index 000000000..9857d6bce --- /dev/null +++ b/ansible/onedr0p/playbooks/tasks/stale_containers.yaml @@ -0,0 +1,36 @@ +--- +# https://github.com/k3s-io/k3s/issues/1900 +- name: Enabled Stale containers + when: stale_containers_state == "enabled" + block: + - name: Stale containers | Create systemd unit + ansible.builtin.copy: + src: files/stale-containers.service + dest: /etc/systemd/system/stale-containers.service + owner: root + group: root + mode: "0644" + + - name: Stale containers | Create systemd timer + ansible.builtin.copy: + src: files/stale-containers.timer + dest: /etc/systemd/system/stale-containers.timer + owner: root + group: root + mode: "0644" + + - name: Stale containers | Start the systemd timer + ansible.builtin.systemd: + name: stale-containers.timer + enabled: true + daemon_reload: true + masked: false + state: started + +- name: Disable Stale containers + when: stale_containers_state == "disabled" + block: + - name: Stale containers | Mask the systemd timer + ansible.builtin.systemd: + name: stale-containers.timer + masked: true diff --git a/ansible/onedr0p/playbooks/templates/custom-cilium-helmchart.yaml.j2 b/ansible/onedr0p/playbooks/templates/custom-cilium-helmchart.yaml.j2 new file mode 100644 index 000000000..2c87af7f8 --- /dev/null +++ b/ansible/onedr0p/playbooks/templates/custom-cilium-helmchart.yaml.j2 @@ -0,0 +1,52 @@ +--- +# https://docs.k3s.io/helm +apiVersion: helm.cattle.io/v1 +kind: HelmChart +metadata: + name: cilium + namespace: kube-system +spec: + # renovate: datasource=helm + repo: https://helm.cilium.io/ + chart: cilium + version: 1.14.1 + targetNamespace: kube-system + bootstrap: true + valuesContent: |- + autoDirectNodeRoutes: true + bpf: + masquerade: true + bgp: + enabled: false + cluster: + name: kubernetes + id: 1 + containerRuntime: + integration: containerd + socketPath: /var/run/k3s/containerd/containerd.sock + endpointRoutes: + enabled: true + hubble: + enabled: false + ipam: + mode: kubernetes + ipv4NativeRoutingCIDR: "{{ k3s_server['cluster-cidr'] }}" + k8sServiceHost: "{{ k3s_registration_address }}" + k8sServicePort: 6443 + kubeProxyReplacement: strict + kubeProxyReplacementHealthzBindAddr: 0.0.0.0:10256 + l2announcements: + enabled: true + leaseDuration: 120s + leaseRenewDeadline: 60s + leaseRetryPeriod: 1s + loadBalancer: + algorithm: maglev + mode: dsr + localRedirectPolicy: true + operator: + rollOutPods: true + rollOutCiliumPods: true + securityContext: + privileged: true + tunnel: disabled diff --git a/ansible/onedr0p/playbooks/templates/custom-cilium-l2.yaml.j2 b/ansible/onedr0p/playbooks/templates/custom-cilium-l2.yaml.j2 new file mode 100644 index 000000000..7b9624673 --- /dev/null +++ b/ansible/onedr0p/playbooks/templates/custom-cilium-l2.yaml.j2 @@ -0,0 +1,21 @@ +--- +# https://docs.cilium.io/en/latest/network/l2-announcements +apiVersion: cilium.io/v2alpha1 +kind: CiliumL2AnnouncementPolicy +metadata: + name: policy +spec: + loadBalancerIPs: true + interfaces: + - ^enp.* + nodeSelector: + matchLabels: + kubernetes.io/os: linux +--- +apiVersion: cilium.io/v2alpha1 +kind: CiliumLoadBalancerIPPool +metadata: + name: pool +spec: + cidrs: + - cidr: "{{ (ansible_default_ipv4.network + '/' + ansible_default_ipv4.netmask) | ansible.utils.ipaddr('network/prefix') }}" diff --git a/ansible/onedr0p/playbooks/templates/custom-coredns-helmchart.yaml.j2 b/ansible/onedr0p/playbooks/templates/custom-coredns-helmchart.yaml.j2 new file mode 100644 index 000000000..64593bc79 --- /dev/null +++ b/ansible/onedr0p/playbooks/templates/custom-coredns-helmchart.yaml.j2 @@ -0,0 +1,77 @@ +--- +# https://docs.k3s.io/helm +apiVersion: helm.cattle.io/v1 +kind: HelmChart +metadata: + name: coredns + namespace: kube-system +spec: + # renovate: datasource=helm + repo: https://coredns.github.io/helm + chart: coredns + version: 1.25.0 + targetNamespace: kube-system + bootstrap: true + valuesContent: |- + fullnameOverride: coredns + replicaCount: 2 + k8sAppLabelOverride: kube-dns + service: + name: kube-dns + clusterIP: {{ k3s_server['service-cidr'] | ansible.utils.nthhost(10) }} + serviceAccount: + create: true + deployment: + annotations: + reloader.stakater.com/auto: "true" + servers: + - zones: + - zone: . + scheme: dns:// + use_tcp: true + port: 53 + plugins: + - name: log + - name: errors + - name: health + configBlock: |- + lameduck 5s + - name: ready + - name: kubernetes + parameters: cluster.local in-addr.arpa ip6.arpa + configBlock: |- + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + - name: prometheus + parameters: 0.0.0.0:9153 + - name: forward + parameters: . /etc/resolv.conf + - name: cache + parameters: 30 + - name: loop + - name: reload + - name: loadbalance + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + app.kubernetes.io/instance: coredns diff --git a/ansible/onedr0p/playbooks/templates/kube-vip-static-pod.yaml.j2 b/ansible/onedr0p/playbooks/templates/kube-vip-static-pod.yaml.j2 new file mode 100644 index 000000000..f54854bec --- /dev/null +++ b/ansible/onedr0p/playbooks/templates/kube-vip-static-pod.yaml.j2 @@ -0,0 +1,57 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: kube-vip + namespace: kube-system + labels: + app.kubernetes.io/instance: kube-vip + app.kubernetes.io/name: kube-vip +spec: + containers: + - name: kube-vip + image: ghcr.io/kube-vip/kube-vip:v0.6.1 + imagePullPolicy: IfNotPresent + args: ["manager"] + env: + - name: address + value: "{{ k3s_registration_address }}" + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: svc_enable + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: prometheus_server + value: :2112 + securityContext: + capabilities: + add: ["NET_ADMIN", "NET_RAW"] + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostAliases: + - hostnames: + - kubernetes + ip: 127.0.0.1 + hostNetwork: true + volumes: + - name: kubeconfig + hostPath: + path: /etc/rancher/k3s/k3s.yaml diff --git a/cluster/crds/traefik/crds.yaml b/cluster/crds/traefik/crds.yaml index 55808bc1f..4bc764df0 100644 --- a/cluster/crds/traefik/crds.yaml +++ b/cluster/crds/traefik/crds.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1beta2 kind: GitRepository metadata: name: traefik-crd-source diff --git a/cluster/infra/sources/authelia-charts.yaml b/cluster/infra/sources/authelia-charts.yaml index e535d0a2b..8d0645d88 100644 --- a/cluster/infra/sources/authelia-charts.yaml +++ b/cluster/infra/sources/authelia-charts.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1beta2 kind: HelmRepository metadata: name: authelia-charts diff --git a/cluster/infra/sources/authentik-charts.yaml b/cluster/infra/sources/authentik-charts.yaml index 9fd05b73c..1f7e242b4 100644 --- a/cluster/infra/sources/authentik-charts.yaml +++ b/cluster/infra/sources/authentik-charts.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1beta2 kind: HelmRepository metadata: name: authentik-charts diff --git a/cluster/infra/sources/descheduler-charts.yaml b/cluster/infra/sources/descheduler-charts.yaml index 9187c64bb..c0b41aff8 100644 --- a/cluster/infra/sources/descheduler-charts.yaml +++ b/cluster/infra/sources/descheduler-charts.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1beta2 kind: HelmRepository metadata: name: descheduler-charts diff --git a/cluster/infra/sources/ealenn-charts.yaml b/cluster/infra/sources/ealenn-charts.yaml index 6b0855791..4366063bc 100644 --- a/cluster/infra/sources/ealenn-charts.yaml +++ b/cluster/infra/sources/ealenn-charts.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1beta2 kind: HelmRepository metadata: name: ealenn-charts diff --git a/cluster/infra/sources/fission-charts.yaml b/cluster/infra/sources/fission-charts.yaml index 3a55580cd..1f2178222 100644 --- a/cluster/infra/sources/fission-charts.yaml +++ b/cluster/infra/sources/fission-charts.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1beta2 kind: HelmRepository metadata: name: fission-charts diff --git a/cluster/infra/sources/go-skynet-charts.yaml b/cluster/infra/sources/go-skynet-charts.yaml index c79227227..b309450a2 100644 --- a/cluster/infra/sources/go-skynet-charts.yaml +++ b/cluster/infra/sources/go-skynet-charts.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1beta2 kind: HelmRepository metadata: name: go-skynet-charts diff --git a/cluster/infra/sources/grafana-charts.yaml b/cluster/infra/sources/grafana-charts.yaml index b7884ea00..fe5b152b4 100644 --- a/cluster/infra/sources/grafana-charts.yaml +++ b/cluster/infra/sources/grafana-charts.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1beta2 kind: HelmRepository metadata: name: grafana-charts diff --git a/cluster/infra/sources/hajimari-charts.yaml b/cluster/infra/sources/hajimari-charts.yaml index 58b177474..48836790c 100644 --- a/cluster/infra/sources/hajimari-charts.yaml +++ b/cluster/infra/sources/hajimari-charts.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1beta2 kind: HelmRepository metadata: name: hajimari-charts diff --git a/cluster/infra/sources/influxdata-charts.yaml b/cluster/infra/sources/influxdata-charts.yaml index 8022c21fb..2ca0cb043 100644 --- a/cluster/infra/sources/influxdata-charts.yaml +++ b/cluster/infra/sources/influxdata-charts.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1beta2 kind: HelmRepository metadata: name: influxdata-charts diff --git a/cluster/infra/sources/jetstack-charts.yaml b/cluster/infra/sources/jetstack-charts.yaml index 04fef5f55..a755f79c7 100644 --- a/cluster/infra/sources/jetstack-charts.yaml +++ b/cluster/infra/sources/jetstack-charts.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1beta2 kind: HelmRepository metadata: name: jetstack-charts diff --git a/cluster/infra/sources/kustomization.yaml b/cluster/infra/sources/kustomization.yaml index f511b939a..cb9270530 100644 --- a/cluster/infra/sources/kustomization.yaml +++ b/cluster/infra/sources/kustomization.yaml @@ -1,4 +1,4 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 +apiVersion: kustomize.config.k8s.io/v1beta2 kind: Kustomization namespace: flux-system resources: diff --git a/cluster/infra/sources/metrics-server-charts.yaml b/cluster/infra/sources/metrics-server-charts.yaml index ed7ab0f1e..461c3a665 100644 --- a/cluster/infra/sources/metrics-server-charts.yaml +++ b/cluster/infra/sources/metrics-server-charts.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1beta2 kind: HelmRepository metadata: name: metrics-server-charts diff --git a/cluster/infra/sources/minio-charts.yaml b/cluster/infra/sources/minio-charts.yaml index 5ea5515f2..09bc0a331 100644 --- a/cluster/infra/sources/minio-charts.yaml +++ b/cluster/infra/sources/minio-charts.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1beta2 kind: HelmRepository metadata: name: minio-charts diff --git a/cluster/infra/sources/nextcloud-charts.yaml b/cluster/infra/sources/nextcloud-charts.yaml index fcef2b27f..407b953cd 100644 --- a/cluster/infra/sources/nextcloud-charts.yaml +++ b/cluster/infra/sources/nextcloud-charts.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1beta2 kind: HelmRepository metadata: name: nextcloud-charts diff --git a/cluster/infra/sources/nicholaswilde-charts.yaml b/cluster/infra/sources/nicholaswilde-charts.yaml index baecdffe6..d700d6c5e 100644 --- a/cluster/infra/sources/nicholaswilde-charts.yaml +++ b/cluster/infra/sources/nicholaswilde-charts.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1beta2 kind: HelmRepository metadata: name: nicholaswilde diff --git a/cluster/infra/sources/prometheus-community-charts.yaml b/cluster/infra/sources/prometheus-community-charts.yaml index 537f267ac..c5d62a142 100644 --- a/cluster/infra/sources/prometheus-community-charts.yaml +++ b/cluster/infra/sources/prometheus-community-charts.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1beta2 kind: HelmRepository metadata: name: prometheus-community diff --git a/cluster/infra/sources/spotahome-redis-operator-charts.yaml b/cluster/infra/sources/spotahome-redis-operator-charts.yaml index 9f87b31db..591de8860 100644 --- a/cluster/infra/sources/spotahome-redis-operator-charts.yaml +++ b/cluster/infra/sources/spotahome-redis-operator-charts.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1beta2 kind: HelmRepository metadata: name: spotahome-redis-operator-charts diff --git a/cluster/infra/sources/timescale-charts.yaml b/cluster/infra/sources/timescale-charts.yaml index 2b5c27353..38c02bbcc 100644 --- a/cluster/infra/sources/timescale-charts.yaml +++ b/cluster/infra/sources/timescale-charts.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1beta2 kind: HelmRepository metadata: name: timescale-charts diff --git a/cluster/infra/sources/traefik-charts.yaml b/cluster/infra/sources/traefik-charts.yaml index 663702a41..24d848ed7 100644 --- a/cluster/infra/sources/traefik-charts.yaml +++ b/cluster/infra/sources/traefik-charts.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1beta2 kind: HelmRepository metadata: name: traefik diff --git a/cluster/infra/sources/vmware-tanzu-charts.yaml b/cluster/infra/sources/vmware-tanzu-charts.yaml index a81d1c10b..6291c6d73 100644 --- a/cluster/infra/sources/vmware-tanzu-charts.yaml +++ b/cluster/infra/sources/vmware-tanzu-charts.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1beta2 kind: HelmRepository metadata: name: vmware-tanzu