diff --git a/.envrc b/.envrc new file mode 100644 index 00000000..547facdd --- /dev/null +++ b/.envrc @@ -0,0 +1,2 @@ +export KUBECONFIG=$(expand_path ./ansible/playbooks/output/k8s-config.yaml) +export ANSIBLE_CONFIG=$(expand_path ./ansible/ansible.cfg) diff --git a/ansible/example/group_vars/all.yml b/ansible/example/group_vars/all.yml deleted file mode 100644 index d28526a9..00000000 --- a/ansible/example/group_vars/all.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -# Role - common -common_timezone: Europe/Amsterdam -common_rpi_overclock: true -common_rpi_poe_hat: true diff --git a/ansible/example/group_vars/masters.yml b/ansible/example/group_vars/masters.yml deleted file mode 100644 index db08431f..00000000 --- a/ansible/example/group_vars/masters.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -# Role - keepalived -keepalived_vip: '192.168.100.50' - -# Role - cluster -cluster_extra_sans: - - "{{ keepalived_vip }}" - - api.kubernetes.lan -cluster_control_plane_endpoint: "api.kubernetes.lan:8443" diff --git a/ansible/family_vars/debian.yml b/ansible/family_vars/debian.yml index b591a742..88320044 100644 --- a/ansible/family_vars/debian.yml +++ b/ansible/family_vars/debian.yml @@ -13,3 +13,8 @@ common_packages: - net-tools - python3-openssl # Needed for ansible 'openssl_certificate_info' module - python-openshift + +kubernetes_packages: + - apt-transport-https + - ca-certificates + - curl diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml index 474386f1..0811eff5 100644 --- a/ansible/group_vars/all.yml +++ b/ansible/group_vars/all.yml @@ -16,14 +16,14 @@ #### # Role - kubernetes #### -# kubernetes_kubectl_version: 1.20.4-00 -# kubernetes_kubelet_version: 1.20.4-00 -# kubernetes_kubeadm_version: 1.20.4-00 +kubernetes_kubectl_version: 1.21.0-00 +kubernetes_kubelet_version: 1.21.0-00 +kubernetes_kubeadm_version: 1.21.0-00 #### # Role - keepalived #### -# keepalived_vip: "" +# keepalived_vip: '' # keepalived_interface: "{{ ansible_default_ipv4['interface'] }}" #### diff --git a/ansible/group_vars/cluster.yml b/ansible/group_vars/cluster.yml index a133b24e..b964d33f 100644 --- a/ansible/group_vars/cluster.yml +++ b/ansible/group_vars/cluster.yml @@ -5,7 +5,7 @@ cluster_extra_sans: cluster_control_plane_endpoint: '{{ keepalived_vip }}:8443' # Specify the Kubernetes version, current release is v1.20.4 -cluster_kubernetes_version: 'v1.20.4' +cluster_kubernetes_version: 'v1.21.0' cluster_apiserver_enable_admission_plugins: - 'NodeRestriction' diff --git a/ansible/group_vars/masters.yml b/ansible/group_vars/masters.yml index 00205e93..93af079a 100644 --- a/ansible/group_vars/masters.yml +++ b/ansible/group_vars/masters.yml @@ -1,10 +1,4 @@ --- -#### -# Role - keepalived -#### -# keepalived_vip: '' -# keepalived_interface: "{{ ansible_default_ipv4['interface'] }}" - #### # Role - cluster #### diff --git a/ansible/inventory b/ansible/inventory index 0db97991..0f4e752a 100644 --- a/ansible/inventory +++ b/ansible/inventory @@ -1,4 +1,3 @@ - [all] k8s-controlplane-01 hostname=k8s-controlplane-01 ansible_host=192.168.1.121 ansible_user=pi k8s-controlplane-02 hostname=k8s-controlplane-02 ansible_host=192.168.1.122 ansible_user=pi diff --git a/ansible/playbooks/all.yml b/ansible/playbooks/all.yml index d4cbea27..7cc46c1f 100644 --- a/ansible/playbooks/all.yml +++ b/ansible/playbooks/all.yml @@ -7,7 +7,7 @@ fail: msg: "Only python3 is supported, you're running {{ ansible_facts['python_version'] }} locally" when: ansible_facts['python']['version']['major'] != 3 -- import_playbook: docker_cache.yml +# - import_playbook: docker_cache.yml - import_playbook: common.yml - import_playbook: masters.yml - import_playbook: cluster.yml diff --git a/ansible/roles/cluster/tasks/main.yml b/ansible/roles/cluster/tasks/main.yml index 517e316a..78e7c2df 100644 --- a/ansible/roles/cluster/tasks/main.yml +++ b/ansible/roles/cluster/tasks/main.yml @@ -65,6 +65,10 @@ kubeadm_join_token: '{{ generated_token.stdout }}' run_once: true +- name: pull control plane images + command: kubeadm config images pull + when: inventory_hostname == groups["controlplane"] + - name: 'initialize | perform cluster initialization on primary control node' include_tasks: 'initialize.yml' when: diff --git a/ansible/roles/cni/tasks/cilium.yml b/ansible/roles/cni/tasks/cilium.yml index 23d532a0..5d12a569 100644 --- a/ansible/roles/cni/tasks/cilium.yml +++ b/ansible/roles/cni/tasks/cilium.yml @@ -1,58 +1,5 @@ --- - -# See https://github.com/cilium/cilium/issues/10645 -- name: set net.ipv4.conf.*.rp_filter to 0 for systemd 245 workaround - ansible.posix.sysctl: - name: '{{ item }}' - value: '0' - sysctl_file: /etc/sysctl.d/98-override_cilium_rp_filter.conf - reload: false - loop: - - net.ipv4.conf.all.rp_filter - - net.ipv4.conf.default.rp_filter - notify: restart systemd-sysctl - -- name: mount sys-fs-bpf - ansible.posix.mount: - path: /sys/fs/bpf - src: bpffs - opts: defaults - state: mounted - fstype: bpf - -- name: add Cilium Helm Repo - community.kubernetes.helm_repository: - name: cilium - repo_url: "https://helm.cilium.io/" - -- name: deploy Cilium - community.kubernetes.helm: - name: cilium - chart_ref: cilium/cilium - release_namespace: kube-system - chart_version: "{{ cni_cilium_helm_version }}" - values: "{{ lookup('template', 'values.yaml.j2') | from_yaml }}" - -- name: patch cilium-operator for helm chart bug - community.kubernetes.k8s: - state: present - definition: - apiVersion: apps/v1 - kind: Deployment - metadata: - name: cilium-operator - namespace: kube-system - spec: - template: - spec: - containers: - - name: cilium-operator - image: cilium/operator-dev:{{ cni_cilium_image_version }} - - -- name: Apply kube-router manifests - community.kubernetes.k8s: - state: present - template: 'generic-kuberouter-only-advertise-routes.yaml.j2' - when: - - cni_kube_router_enabled +- name: applying cilium + command: + cmd: kubectl apply -f https://raw.githubusercontent.com/cilium/cilium/v1.9/install/kubernetes/quick-install.yaml + creates: /etc/cni/net.d/10-cilium.conflist diff --git a/ansible/roles/cni/templates/generic-kuberouter-only-advertise-routes.yaml.j2 b/ansible/roles/cni/templates/generic-kuberouter-only-advertise-routes.yaml.j2 deleted file mode 100644 index 2db358b3..00000000 --- a/ansible/roles/cni/templates/generic-kuberouter-only-advertise-routes.yaml.j2 +++ /dev/null @@ -1,129 +0,0 @@ ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - labels: - k8s-app: kube-router - tier: node - name: kube-router - namespace: kube-system -spec: - selector: - matchLabels: - k8s-app: kube-router - tier: node - template: - metadata: - labels: - k8s-app: kube-router - tier: node - spec: - priorityClassName: system-node-critical - serviceAccountName: kube-router - containers: - - name: kube-router - image: "{{ kube_router_image }}" - imagePullPolicy: Always - args: - - "--run-router=true" - - "--run-firewall=false" - - "--run-service-proxy=false" - - "--bgp-graceful-restart=true" - - "--enable-cni=false" - - "--enable-pod-egress=false" - - "--enable-ibgp=true" - - "--enable-overlay=false" - - "--peer-router-ips={{ cni_bgp_peer_address }}" - - "--peer-router-asns={{ cni_bgp_peer_asn }}" - - "--cluster-asn={{ bgp_cluster_asn }}" - - "--advertise-cluster-ip=true" - - "--advertise-external-ip=true" - - "--advertise-loadbalancer-ip=true" - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - livenessProbe: - httpGet: - path: /healthz - port: 20244 - initialDelaySeconds: 10 - periodSeconds: 3 - resources: - requests: - cpu: 250m - memory: 250Mi - securityContext: - privileged: true - volumeMounts: - - name: xtables-lock - mountPath: /run/xtables.lock - readOnly: false - hostNetwork: true - tolerations: - - effect: NoSchedule - operator: Exists - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - volumes: - - name: xtables-lock - hostPath: - path: /run/xtables.lock - type: FileOrCreate ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kube-router - namespace: kube-system ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: kube-router - namespace: kube-system -rules: -- apiGroups: - - "" - resources: - - namespaces - - pods - - services - - nodes - - endpoints - verbs: - - list - - get - - watch -- apiGroups: - - "networking.k8s.io" - resources: - - networkpolicies - verbs: - - list - - get - - watch -- apiGroups: - - extensions - resources: - - networkpolicies - verbs: - - get - - list - - watch ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: kube-router -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kube-router -subjects: -- kind: ServiceAccount - name: kube-router - namespace: kube-system diff --git a/ansible/roles/cni/templates/values.yaml.j2 b/ansible/roles/cni/templates/values.yaml.j2 deleted file mode 100644 index 07a4d681..00000000 --- a/ansible/roles/cni/templates/values.yaml.j2 +++ /dev/null @@ -1,148 +0,0 @@ -image: - repository: "{{ cni_cilium_image_repository }}" - tag: "{{ cni_cilium_image_version }}" - -# autoDirectNodeRoutes enables installation of PodCIDR routes between worker -# nodes if worker nodes share a common L2 network segment. -autoDirectNodeRoutes: true - -bpf: - # -- Enable native IP masquerade support in eBPF - masquerade: {{ cni_cilium_bpf_masquerade }} - - # -- Configure whether direct routing mode should route traffic via - # host stack (true) or directly and more efficiently out of BPF (false) if - # the kernel supports it. The latter has the implication that it will also - # bypass netfilter in the host namespace. - hostRouting: {{ cni_cilium_bpf_hostrouting }} - - # -- Configure the eBPF-based TPROXY to reduce reliance on iptables rules - # for implementing Layer 7 policy. - tproxy: {{ cni_cilium_bpf_tproxy }} - -# externalIPs is the configuration for ExternalIPs service handling -externalIPs: - # enabled enables ExternalIPs functionality - enabled: true - -# endpointRoutes enables use of per endpoint routes instead of routing vis -# the cilium_host interface -endpointRoutes: - enabled: {{ cni_cilium_endpoint_routes }} - -# hostServices is the configuration for ClusterIP service handling in host namespace -hostServices: - # enabled enables host reachable functionality - enabled: true - -# nodePort is the configuration for NodePort service handling -nodePort: - # enabled enables NodePort functionality - enabled: true - -{% if cni_cilium_hubble_enabled %} - -hubble: - enabled: true - # Enables the provided list of Hubble metrics. - metrics: - enabled: - - dns:query;ignoreAAAA - - drop - - tcp - - flow - - port-distribution - - icmp - - http - listenAddress: ':4244' - relay: - enabled: true - image: - repository: docker.io/cilium/hubble-relay-dev - tag: v1.9.1 - ui: - enabled: true - frontend: - image: - repository: docker.io/mcfio/hubble-ui-dev - tag: v0.7.3 - backend: - image: - repository: docker.io/raspbernetes/hubble-ui-backend - tag: v0.7.5 - proxy: - image: - repository: docker.io/envoyproxy/envoy - tag: "{{ cni_cilium_enovy_proxy_image_version }}" - -{% endif %} - -ipam: - operator: - # default: "10.0.0.0/8", however, using existing podCIDR - clusterPoolIPv4PodCIDR: "{{ cluster_pod_subnet }}" - clusterPoolIPv4MaskSize: 24 - -# kubeProxyReplacement enables kube-proxy replacement in Cilium BPF datapath -{% if cluster_kube_proxy_enabled == false %} -kubeProxyReplacement: "strict" -{% elif cluster_kube_proxy_enabled %} -kubeProxyReplacement: "probe" -{% endif %} - -# kubeProxyReplacement healthz server bind address -# To enable set the value to '0.0.0.0:10256' for all ipv4 -# addresses and this '[::]:10256' for all ipv6 addresses. -# By default it is disabled. -kubeProxyReplacementHealthzBindAddr: '0.0.0.0:10256' - -# prometheus enables serving metrics on the configured port at /metrics -# Enables metrics for cilium-agent. -prometheus: - enabled: true - port: 9090 - # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/master/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) - serviceMonitor: - enabled: false - -operator: - image: - repository: docker.io/cilium/operator-dev - tag: "{{ cni_cilium_image_version }}" - # Enables metrics for cilium-operator. - prometheus: - enabled: true - serviceMonitor: - enabled: false - -# kubeConfigPath: ~/.kube/config -k8sServiceHost: {{ k8s_service_host }} # 192.168.1.200 -k8sServicePort: {{ k8s_service_port }} # 8443 - -# nativeRoutingCIDR allows to explicitly specify the CIDR for native routing. This -# value corresponds to the configured cluster-cidr. -nativeRoutingCIDR: {{ k8s_native_cluster_cidr }} - - -containerRuntime: - integration: {{ cri_plugin }} - -# -- Configure the encapsulation configuration for communication between nodes. -# Possible values: -# - disabled -# - vxlan (default) -# - geneve -tunnel: "disabled" - -# loadBalancer is the general configuration for service load balancing -loadBalancer: - # algorithm is the name of the load balancing algorithm for backend - # selection e.g. random or maglev - algorithm: maglev - # mode is the operation mode of load balancing for remote backends - # e.g. snat, dsr, hybrid - mode: snat - -# disableEnvoyVersionCheck removes the check for Envoy, which can be useful on -# AArch64 as the images do not currently ship a version of Envoy. -disableEnvoyVersionCheck: true diff --git a/ansible/roles/common/tasks/common.yml b/ansible/roles/common/tasks/common.yml index a8c57302..0c6e79ea 100644 --- a/ansible/roles/common/tasks/common.yml +++ b/ansible/roles/common/tasks/common.yml @@ -34,7 +34,7 @@ mode: 0644 - name: set timezone - command: "/usr/bin/timedatectl set-timezone {{ common_timezone }}" + command: '/usr/bin/timedatectl set-timezone {{ common_timezone }}' when: common_timezone not in timedatectl_result.stdout_lines[0] - name: enable ntp @@ -43,36 +43,6 @@ - name: 'set hostname' hostname: - name: "{{ inventory_hostname }}" + name: '{{ inventory_hostname }}' -- name: 'ensure required modules load at system startup' - ansible.builtin.copy: - dest: '/etc/modules-load.d/{{ cri_plugin }}.conf' - content: | - overlay - br_netfilter - -# Ensure the br_netfilter module is loaded and iptables can see bridged traffic -# https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#letting-iptables-see-bridged-traffic -- name: 'ensure required modules are loaded' - community.general.modprobe: - name: '{{ item }}' - state: 'present' - loop: - - 'overlay' - - 'br_netfilter' - tags: - - notest -- name: 'ensure sysctl options are configured for container runtime' - ansible.posix.sysctl: - name: '{{ item }}' - value: '1' - state: 'present' - sysctl_file: '/etc/sysctl.d/99-kubernetes-cri.conf' - reload: false - loop: - - net.bridge.bridge-nf-call-iptables - - net.bridge.bridge-nf-call-ip6tables - - net.ipv4.ip_forward - notify: restart systemd-sysctl diff --git a/ansible/roles/kubernetes/containerd/handlers/main.yml b/ansible/roles/kubernetes/containerd/handlers/main.yml new file mode 100644 index 00000000..2a0bd3c0 --- /dev/null +++ b/ansible/roles/kubernetes/containerd/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: restart systemd-sysctl + systemd: + name: systemd-sysctl + state: restarted diff --git a/ansible/roles/kubernetes/containerd/tasks/main.yml b/ansible/roles/kubernetes/containerd/tasks/main.yml new file mode 100644 index 00000000..5aa5ce49 --- /dev/null +++ b/ansible/roles/kubernetes/containerd/tasks/main.yml @@ -0,0 +1,32 @@ +--- +# Ensure the br_netfilter module is loaded and iptables can see bridged traffic +# https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#letting-iptables-see-bridged-traffic +- name: 'ensure required modules are loaded' + community.general.modprobe: + name: '{{ item }}' + state: 'present' + loop: + - 'overlay' + - 'br_netfilter' + tags: + - notest + +- name: 'ensure required modules load at system startup' + ansible.builtin.copy: + dest: '/etc/modules-load.d/{{ cri_plugin }}.conf' + content: | + overlay + br_netfilter + +- name: 'ensure sysctl options are configured for container runtime' + ansible.posix.sysctl: + name: '{{ item }}' + value: '1' + state: 'present' + sysctl_file: '/etc/sysctl.d/99-kubernetes-cri.conf' + reload: false + loop: + - net.bridge.bridge-nf-call-iptables + - net.ipv4.ip_forward + - net.bridge.bridge-nf-call-ip6tables + notify: restart systemd-sysctl diff --git a/ansible/roles/kubernetes/meta/main.yml b/ansible/roles/kubernetes/meta/main.yml new file mode 100644 index 00000000..9955ccdd --- /dev/null +++ b/ansible/roles/kubernetes/meta/main.yml @@ -0,0 +1,6 @@ +# main.yml - use dependency management to select the proper container runtime +--- +dependencies: + - role: kubernetes/containerd + when: + - cri_plugin == 'containerd' diff --git a/ansible/roles/kubernetes/tasks/debian.yml b/ansible/roles/kubernetes/tasks/debian.yml index 05bb2eeb..8fff18dc 100644 --- a/ansible/roles/kubernetes/tasks/debian.yml +++ b/ansible/roles/kubernetes/tasks/debian.yml @@ -1,12 +1,79 @@ --- +# Ensure the br_netfilter module is loaded and iptables can see bridged traffic +# https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#letting-iptables-see-bridged-traffic +- name: 'ensure required modules are loaded' + community.general.modprobe: + name: '{{ item }}' + state: 'present' + loop: + - 'br_netfilter' + tags: + - notest + +- name: 'ensure required modules load at system startup' + ansible.builtin.copy: + dest: '/etc/modules-load.d/k8s.conf' + content: | + br_netfilter + +# Set /proc/sys/net/bridge/bridge-nf-call-iptables to 1 by running +# sysctl net.bridge.bridge-nf-call-iptables=1 to pass bridged IPv4 traffic to iptables’ chains. +# This is a requirement for some CNI plugins to work, these persist across reboots. +# https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#letting-iptables-see-bridged-traffic +- name: 'ensure sysctl options are configured for container runtime' + ansible.posix.sysctl: + name: '{{ item }}' + value: '1' + state: 'present' + sysctl_file: '/etc/sysctl.d/k8s.conf' + reload: false + loop: + - net.bridge.bridge-nf-call-iptables + - net.bridge.bridge-nf-call-ip6tables + notify: restart systemd-sysctl + +# Required for dual-stack +# https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/dual-stack-support/#before-you-begin +- name: Update bridged IPv6 traffic forwarding + sysctl: + name: net.ipv6.conf.all.forwarding + value: '1' + state: present + sysctl_file: /etc/sysctl.d/k8s.conf + +- name: apt-get upgrade + apt: + upgrade: full + update_cache: true + cache_valid_time: 3600 + force_apt_get: true + autoclean: true + autoremove: true + register: apt_upgrade + retries: 5 + until: apt_upgrade is success + +- name: install kubernetes packages + apt: + name: '{{ kubernetes_packages }}' + install_recommends: false + update_cache: true + cache_valid_time: 3600 + force_apt_get: true + autoclean: true + autoremove: true + register: apt_install_common + retries: 5 + until: apt_install_common is success + ignore_errors: '{{ ansible_check_mode }}' + # Instructions: https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#installing-kubeadm-kubelet-and-kubectl -- name: add apt signing key for kubernetes (1/4) +- name: add apt signing key for kubernetes apt_key: url: https://packages.cloud.google.com/apt/doc/apt-key.gpg state: present -# Removed the following repo due to stability issues: https://apt.kubernetes.io/ -- name: adding apt repository for kubernetes (2/4) +- name: adding apt repository for kubernetes apt_repository: repo: deb http://packages.cloud.google.com/apt/ kubernetes-xenial main state: present @@ -39,30 +106,8 @@ - 'kubelet' - 'kubeadm' -- name: add apt signing key for helm (1/3) - apt_key: - url: https://baltocdn.com/helm/signing.asc - state: present - -- name: adding apt repository for helm (2/3) - apt_repository: - repo: deb https://baltocdn.com/helm/stable/debian/ all main - state: present - register: helm_repository - retries: 10 - until: helm_repository is success - -- name: install helm package (3/3) - apt: - name: helm - state: present - force: true - update_cache: true - register: apt_install_helm - retries: 5 - until: apt_install_helm is success - -# Adding required Kubernetes cgroups +# TODO: Move to correct location; Perhaps common for RPI? +# Adding required Kubernetes cgroups for Raspberry Pi - name: Enable container features ansible.builtin.replace: path: '{{ common_rpi_cmd_file }}' @@ -70,34 +115,9 @@ replace: '\1 {{ item }}' with_items: - 'cgroup_enable=cpuset' - - 'cgroup_memory=1' - 'cgroup_enable=memory' + - 'cgroup_memory=1' + - 'swapaccount=1' + notify: reboot hosts when: ansible_architecture | regex_search('arm|aarch') - -# Set /proc/sys/net/bridge/bridge-nf-call-iptables to 1 by running -# sysctl net.bridge.bridge-nf-call-iptables=1 to pass bridged IPv4 traffic to iptables’ chains. -# This is a requirement for some CNI plugins to work, these persist across reboots. -# https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#letting-iptables-see-bridged-traffic -- name: Update bridged IPv4 traffic to iptables' chains - sysctl: - name: net.bridge.bridge-nf-call-iptables - value: '1' - state: present - sysctl_file: /etc/sysctl.d/99-kubernetes-cri.conf - -- name: Update bridged IPv4 traffic to ip6tables' chains - sysctl: - name: net.bridge.bridge-nf-call-ip6tables - value: '1' - state: present - sysctl_file: /etc/sysctl.d/99-kubernetes-cri.conf - -# Required for containerd CRI prerequisites -# https://kubernetes.io/docs/setup/production-environment/container-runtimes/#prerequisites-1 -- name: Update bridged IPv4 traffic forwarding - sysctl: - name: net.ipv4.ip_forward - value: '1' - state: present - sysctl_file: /etc/sysctl.d/99-kubernetes-cri.conf diff --git a/setup/cloud-config.yml b/setup/cloud-config.yml index 8e83266d..6bf15070 100755 --- a/setup/cloud-config.yml +++ b/setup/cloud-config.yml @@ -42,6 +42,7 @@ write_files: version: 2 ethernets: eth0: + dhcp4: no addresses: - 192.168.1.121/24 gateway4: 192.168.1.1