From 47f1dca40567227ff32710f6d2527b8d849a95fe Mon Sep 17 00:00:00 2001 From: Sumit Solanki Date: Thu, 17 Oct 2024 20:52:51 +0530 Subject: [PATCH] Feat: ABI SNO & Compact Support For z/VM (#335) Support has been added for ABI SNO and Compact type of OCP cluster installation on z/VM. --------- Signed-off-by: Sumit Solanki Co-authored-by: Sumit Solanki --- docs/run-the-playbooks-for-abi.md | 27 ++++++----- docs/set-variables-group-vars.md | 22 +++++++++ inventories/default/group_vars/zvm.yaml | 37 +++++++++++++++ playbooks/0_setup.yaml | 1 + playbooks/create_abi_cluster.yaml | 22 +++++++-- roles/boot_zvm_nodes/tasks/main.yaml | 33 +++++++++++++ roles/boot_zvm_nodes/templates/boot_nodes.py | 47 +++++++++++++++++++ roles/dns/tasks/main.yaml | 1 + .../templates/agent-config.yaml.j2 | 2 + roles/set_inventory/templates/hosts.j2 | 15 ++++-- roles/setup_params/tasks/main.yaml | 27 +++++++++++ .../templates/param-file.param.j2 | 1 + 12 files changed, 217 insertions(+), 18 deletions(-) create mode 100644 inventories/default/group_vars/zvm.yaml create mode 100644 roles/boot_zvm_nodes/tasks/main.yaml create mode 100644 roles/boot_zvm_nodes/templates/boot_nodes.py create mode 100644 roles/setup_params/tasks/main.yaml create mode 100644 roles/setup_params/templates/param-file.param.j2 diff --git a/docs/run-the-playbooks-for-abi.md b/docs/run-the-playbooks-for-abi.md index 3634a9c4..c3a65843 100644 --- a/docs/run-the-playbooks-for-abi.md +++ b/docs/run-the-playbooks-for-abi.md @@ -1,21 +1,26 @@ -# Run the Playbooks -## Prerequisites -* KVM host with root user access or user with sudo privileges. +# Run the Agent Based Installer (ABI) Playbooks +## Prerequisites +* KVM host with root user access or user with sudo privileges. ( applicable only in case of KVM) +* z/VM bastion with root user access or user with sudo privileges. ( applicable only in case of z/VM) +* z/VM Host with desired network cards enabled and storage details. ( applicable only in case of z/VM) ## Note: -* This playbook only support for single node cluster (SNO) on KVM using ABI. -* As of now we are supporting only macvtap for Agent based installation (ABI) on KVM +* This playbook support SNO, Compact and HA type of OCP cluster installation on KVM using ABI. +* This playbook support both macvtap and NAT network mode for Agent based installation (ABI) on KVM. +* This playbook only support SNO and Compact type of OCP cluster installation on z/VM using ABI. +* As of now this playbook support OCP cluster installation using `vSwitch` network on z/VM with both type of storage `fcp` and `dasd`. ### Steps: ## Step-1: Initial Setup for ABI * Navigate to the [root folder of the cloned Git repository](https://github.com/IBM/Ansible-OpenShift-Provisioning) in your terminal (`ls` should show [ansible.cfg](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/ansible.cfg)). -* Update variables in Section (1 - 9) and Section 12 - OpenShift Settings -* Update variables in Section - 19 ( Agent Based Installer ) in [all.yaml](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/inventories/default/group_vars/all.yaml.template) before running the playbooks. -* Section 7 - ( Bootstrap Node ) need to be comment or remove while using it for ABI. -* In case of SNO Section 8 - ( Control Nodes ) Virtual CPU should be 8 ( vcpu: 8 ) -* In case of SNO Section 9 ( Compute Nodes ) need to be comment or remove -* First playbook to be run is 0_setup.yaml which will create inventory file for ABI and will add ssh key to the kvm host. +* Update variables in Section (1 - 9) and OpenShift Settings with `machine_network` +* Update variables in Section - 14 ( `Agent Based Installer` ) in [all.yaml](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/inventories/default/group_vars/all.yaml.template) before running the playbooks. +* Section 7 - ( `Bootstrap Node` ) need to be comment or remove while using it for ABI. +* In case of SNO Section 8 - ( `Control Nodes` ) Virtual CPU should be 8 ( `vcpu: 8` ) +* In case of SNO Section 9 ( `Compute Nodes` ) need to be comment or remove +* First playbook to be run is `0_setup.yaml` which will create inventory file for ABI and will add ssh key to the kvm host. +* In case of z/VM update variables in [zvm.yaml](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/inventories/default/group_vars/zvm.yaml). * Run this shell command: ``` diff --git a/docs/set-variables-group-vars.md b/docs/set-variables-group-vars.md index c27715ab..64c3c635 100644 --- a/docs/set-variables-group-vars.md +++ b/docs/set-variables-group-vars.md @@ -352,3 +352,25 @@ **hcp.data_plane.zvm.interface.ip** | IP addresses for to be used for zVM nodes | 192.168.10.1 **hcp.data_plane.zvm.nodes.dasd.disk_id** | Disk id for dasd disk to be used for zVM node | 4404 **hcp.data_plane.zvm.nodes.lun** | Disk details of fcp disk to be used for zVM node | 4404 + +## ZVM ( Optional ) +**Variable Name** | **Description** | **Example** +:--- | :--- | :--- +**zvm.network_mode** | Network mode for zvm nodes Supported modes: vswitch,osa, RoCE | vswitch +**zvm.disk_type** | Disk type for zvm nodes Supported disk types: fcp, dasd | dasd +**zvm.subnetmask** | Subnet mask for compute nodes | 255.255.255.0 +**zvm.gateway** | Gateway for compute nodes | 192.168.10.1 +**zvm.vcpus** | vCPUs for compute nodes | 4 +**zvm.memory** | RAM for compute nodes | 16384 +**zvm.nodes** | Set of parameters for zvm nodes Give the details of each zvm node here | +**zvm.nodes.name** | Name of the zVM guest | m1317002 +**zvm.nodes.host** | Host name of the zVM guests which we use to login 3270 console | boem1317 +**zvm.nodes.user** | Username for zVM guests to login | m1317002 +**zvm.nodes.password** | password for the zVM guests to login | password +**zvm.nodes.interface.ifname** | Network interface name for zVM guests | encbdf0 +**zvm.nodes.interface.nettype** | Network type for zVM guests for network connectivity | qeth +**zvm.nodes.interface.subchannels** | subchannels for zVM guests interfaces | 0.0.bdf0,0.0.bdf1,0.0.bdf2 +**zvm.nodes.interface.options** | Configurations options | layer2=1 +**zvm.interface.ip** | IP addresses for to be used for zVM nodes | 192.168.10.1 +**zvm.nodes.dasd.disk_id** | Disk id for dasd disk to be used for zVM node | 4404 +**zvm.nodes.lun** | Disk details of fcp disk to be used for zVM node | 840a diff --git a/inventories/default/group_vars/zvm.yaml b/inventories/default/group_vars/zvm.yaml new file mode 100644 index 00000000..faf2ea09 --- /dev/null +++ b/inventories/default/group_vars/zvm.yaml @@ -0,0 +1,37 @@ +# For a comprehensive description of each variable, please see documentation here: +# https://ibm.github.io/Ansible-OpenShift-Provisioning/set-variables-group-vars/ +# ZVM Section + +zvm: + network_mode: # Supported modes: vswitch + disk_type: # Supported modes: fcp , dasd + subnetmask: + gateway: + nameserver: + vcpus: + memory: + + nodes: + - name: + host: + user: + password: + interface: + ifname: encbdf0 + nettype: qeth + subchannels: 0.0.bdf0,0.0.bdf1,0.0.bdf2 + options: layer2=1 + ip: + + # Required if disk_type is dasd + dasd: + disk_id: + + # Required if disk_type is fcp + lun: + - id: + paths: + - wwpn: + fcp: + - wwpn: + fcp: diff --git a/playbooks/0_setup.yaml b/playbooks/0_setup.yaml index 54bf91dd..8c56cd3b 100644 --- a/playbooks/0_setup.yaml +++ b/playbooks/0_setup.yaml @@ -7,6 +7,7 @@ gather_facts: true vars_files: - "{{ inventory_dir }}/group_vars/disconnected.yaml" + - "{{ inventory_dir }}/group_vars/zvm.yaml" roles: - set_inventory - install_galaxy diff --git a/playbooks/create_abi_cluster.yaml b/playbooks/create_abi_cluster.yaml index a1bdc798..eb91c861 100644 --- a/playbooks/create_abi_cluster.yaml +++ b/playbooks/create_abi_cluster.yaml @@ -6,14 +6,30 @@ become: false vars_files: - "{{ inventory_dir }}/group_vars/all.yaml" - - "{{ inventory_dir }}/host_vars/{{ env.z.lpar1.hostname }}.yaml" + - "{{ inventory_dir }}/group_vars/zvm.yaml" roles: - common # Common Variable the will be used by all the inwalked roles. - download_ocp_installer # Download Openshift Installer. - prepare_configs # Prepare AgentConfig & InstallConfig. - create_agent # Create Agents || Build initrd.img, rootfs.img & kernelfs.img. -# Boot ABI Agents. +- name: Start z/VM Nodes + hosts: bastion + become: false + vars_files: + - "{{ inventory_dir }}/group_vars/zvm.yaml" + tasks: + - block: + - name: Setup Param File + include_role: + name: setup_params + + - name: Boot z/VM Guests + include_tasks: ../roles/boot_zvm_nodes/tasks/main.yaml + loop: "{{ range( zvm.nodes | length ) | list }}" + when: installation_type | lower == 'zvm' + +# Boot ABI KVM Agents. - name: Boot ABI Agents hosts: kvm_host[0] become: false @@ -21,4 +37,4 @@ - "{{ inventory_dir }}/group_vars/all.yaml" roles: - common - - boot_abi_agents + - { role: boot_abi_agents, when: installation_type | lower == 'kvm' } diff --git a/roles/boot_zvm_nodes/tasks/main.yaml b/roles/boot_zvm_nodes/tasks/main.yaml new file mode 100644 index 00000000..80c490b4 --- /dev/null +++ b/roles/boot_zvm_nodes/tasks/main.yaml @@ -0,0 +1,33 @@ +--- +- name: Creating agents + block: + - name: Getting script for booting + template: + src: ../templates/boot_nodes.py + dest: /root/ansible_workdir/boot_nodes.py + + - name: Debug + debug: + msg: "Booting {{ env.cluster.networking.metadata_name }}-{{ item }}" + + - name: Booting zvm node + shell: | + python /root/ansible_workdir/boot_nodes.py \ + --zvmname "{{ zvm.nodes[item].name }}" \ + --zvmhost "{{ zvm.nodes[item].host }}" \ + --zvmuser "{{ zvm.nodes[item].user }}" \ + --zvmpass "{{ zvm.nodes[item].password }}" \ + --cpu "{{ zvm.vcpus }}" \ + --memory "{{ zvm.memory }}" \ + --kernel 'file:///var/lib/libvirt/images/pxeboot/kernel.img' \ + --initrd 'file:///var/lib/libvirt/images/pxeboot/initrd.img' \ + --cmdline "$(cat /root/ansible_workdir/{{ env.cluster.networking.metadata_name }}-{{ item }}.param)" \ + --network "{{ zvm.network_mode }}" + + - name: Attaching dasd disk + shell: vmcp attach {{ zvm.nodes[item].dasd.disk_id }} to {{ zvm.nodes[item].name }} + when: zvm.disk_type | lower == 'dasd' + + - name: Attaching fcp disks + shell: vmcp attach {{ zvm.nodes[item].lun[0].paths[0].fcp.split('.')[-1] }} to {{ zvm.nodes[item].name }} + when: zvm.disk_type | lower == 'fcp' diff --git a/roles/boot_zvm_nodes/templates/boot_nodes.py b/roles/boot_zvm_nodes/templates/boot_nodes.py new file mode 100644 index 00000000..738118ef --- /dev/null +++ b/roles/boot_zvm_nodes/templates/boot_nodes.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python3 +from tessia.baselib.hypervisors.zvm.zvm import HypervisorZvm +import argparse + +parser = argparse.ArgumentParser(description="Get the environment.") + +parser.add_argument("--zvmname", type=str, help="z/VM Hypervisor name", required=True) +parser.add_argument("--zvmhost", type=str, help="z/VM Hostname or IP", required=True) +parser.add_argument("--zvmuser", type=str, help="z/VM user", required=True) +parser.add_argument("--zvmpass", type=str, help="z/VM user password", required=True) +parser.add_argument("--cpu", type=int, help="number of Guest CPUs", required=True) +parser.add_argument("--memory", type=int, help="Guest memory in MB", required=True) +parser.add_argument("--kernel", type=str, help="kernel URI", required=True, default='') +parser.add_argument("--cmdline", type=str, help="kernel cmdline", required=True, default='') +parser.add_argument("--initrd", type=str, help="Initrd URI", required=True, default='') +parser.add_argument("--network", type=str, help="Network mode for zvm nodes Supported modes: OSA, vswitch, RoCE , Hipersockets", required=True) + +args = parser.parse_args() + +parameters = { + 'transfer-buffer-size': 8000 + } + +interfaces=[] +if args.network.lower() == 'osa' or args.network.lower() == 'hipersockets': + interfaces=[{ "type": "osa", "id": "{{ zvm.nodes[item].interface.subchannels.split(',') | map('regex_replace', '0.0.', '') | join(',') }}"}] +elif args.network.lower() == 'roce': + interfaces=[{ "type": "pci", "id": "{{ zvm.nodes[item].interface.ifname }}"}] + +guest_parameters = { +"boot_method": "network", +"storage_volumes" : [], +"ifaces" : interfaces, +"netboot": { + "cmdline": args.cmdline, + "kernel_uri": args.kernel, + "initrd_uri": args.initrd, + } +} + +zvm = HypervisorZvm(args.zvmname,args.zvmhost, args.zvmuser, args.zvmpass, parameters) +zvm.login() +print("Logged in ") +zvm.start(args.zvmuser, args.cpu, args.memory, guest_parameters) +print("VM Started") +zvm.logoff() +print("Logged out") diff --git a/roles/dns/tasks/main.yaml b/roles/dns/tasks/main.yaml index 7f792552..ff0dd1a7 100644 --- a/roles/dns/tasks/main.yaml +++ b/roles/dns/tasks/main.yaml @@ -159,3 +159,4 @@ ansible.builtin.service: name: NetworkManager state: restarted + when: installation_type | lower != 'zvm' diff --git a/roles/prepare_configs/templates/agent-config.yaml.j2 b/roles/prepare_configs/templates/agent-config.yaml.j2 index 2426b1b4..574561f0 100644 --- a/roles/prepare_configs/templates/agent-config.yaml.j2 +++ b/roles/prepare_configs/templates/agent-config.yaml.j2 @@ -4,6 +4,7 @@ metadata: name: {{ env.cluster.networking.metadata_name }} rendezvousIP: {{ env.cluster.nodes.control.ip[0] }} +{% if installation_type | lower != 'zvm' %} hosts: {% for item in range( env.cluster.nodes.control.ip | length ) %} - hostname: "{{ env.cluster.nodes.control.hostname[item] }}" @@ -64,4 +65,5 @@ hosts: next-hop-interface: eth0 table-id: 254 {% endfor %} +{% endif %} {% endif %} \ No newline at end of file diff --git a/roles/set_inventory/templates/hosts.j2 b/roles/set_inventory/templates/hosts.j2 index ac5a58c9..b1f607b9 100644 --- a/roles/set_inventory/templates/hosts.j2 +++ b/roles/set_inventory/templates/hosts.j2 @@ -4,10 +4,17 @@ [file_server] {{ env.file_server.ip }} ansible_user={{ env.file_server.user }} ansible_become_password={{ env.file_server.pass }} -[kvm_host] -{{ env.z.lpar1.hostname }} ansible_host={{ env.z.lpar1.ip }} ansible_user={{ env.z.lpar1.user }} ansible_become_password={{ env.z.lpar1.pass }} -{{ (env.z.lpar2.hostname + ' ansible_host=' + env.z.lpar2.ip + ' ansible_user=' + env.z.lpar2.user + ' ansible_become_password=' + env.z.lpar2.pass ) if env.z.lpar2.hostname is defined else '' }} -{{ (env.z.lpar3.hostname + ' ansible_host=' + env.z.lpar3.ip + ' ansible_user=' + env.z.lpar3.user + ' ansible_become_password=' + env.z.lpar3.pass ) if env.z.lpar3.hostname is defined else '' }} +{% if ( installation_type | upper != 'ZVM' ) %} +{{ '[kvm_host]' }} +{{ ( env.z.lpar1.hostname | string + ' ansible_host=' + env.z.lpar1.ip | string + ' ansible_user=' + env.z.lpar1.user | string + ' ansible_become_password=' + env.z.lpar1.pass | string ) if env.z.lpar1.hostname is defined else '' }} +{{ ( env.z.lpar2.hostname + ' ansible_host=' + env.z.lpar2.ip + ' ansible_user=' + env.z.lpar2.user + ' ansible_become_password=' + env.z.lpar2.pass ) if env.z.lpar2.hostname is defined else '' }} +{{ ( env.z.lpar3.hostname + ' ansible_host=' + env.z.lpar3.ip + ' ansible_user=' + env.z.lpar3.user + ' ansible_become_password=' + env.z.lpar3.pass ) if env.z.lpar3.hostname is defined else '' }} +{% else %} +{{ '[zvm_host]' }} +{% for item in range( zvm.nodes | length ) %} +{{ zvm.nodes[item].name | string + ' ansible_host=' + zvm.nodes[item].interface.ip | string + ' ansible_user=' + zvm.nodes[item].user | string + ' ansible_become_password=' + zvm.nodes[item].password | string }} +{% endfor %} +{% endif %} [bastion] {{ env.bastion.networking.hostname }} ansible_host={{ env.bastion.networking.ip }} ansible_user={{ env.bastion.access.user }} ansible_become_password={{ env.bastion.access.pass }} diff --git a/roles/setup_params/tasks/main.yaml b/roles/setup_params/tasks/main.yaml new file mode 100644 index 00000000..ee9a2f02 --- /dev/null +++ b/roles/setup_params/tasks/main.yaml @@ -0,0 +1,27 @@ +--- +- name: Generate param files + template: + src: param-file.param.j2 + dest: ~/{{ abi.ansible_workdir }}/{{ env.cluster.networking.metadata_name }}-{{ item }}.param + with_sequence: start=0 end={{ (zvm.nodes | length) - 1 }} stride=1 + loop_control: + extended: yes + index_var: item + +- name: Copy rootfs.img + ansible.builtin.copy: + src: "~/{{ abi.ansible_workdir }}/boot-artifacts/agent.{{ ansible_architecture }}-rootfs.img" + dest: /var/www/html/rootfs.img + remote_src: yes + +- name: Copy initrd.img + ansible.builtin.copy: + src: "~/{{ abi.ansible_workdir }}/boot-artifacts/agent.{{ ansible_architecture }}-initrd.img" + dest: /var/lib/libvirt/images/pxeboot/initrd.img + remote_src: yes + +- name: Copy kernel.img + ansible.builtin.copy: + src: "~/{{ abi.ansible_workdir }}/boot-artifacts/agent.{{ ansible_architecture }}-kernel.img" + dest: /var/lib/libvirt/images/pxeboot/kernel.img + remote_src: yes \ No newline at end of file diff --git a/roles/setup_params/templates/param-file.param.j2 b/roles/setup_params/templates/param-file.param.j2 new file mode 100644 index 00000000..f56f32d3 --- /dev/null +++ b/roles/setup_params/templates/param-file.param.j2 @@ -0,0 +1 @@ +rd.neednet=1 ai.ip_cfg_override=1 console=ttysclp0 coreos.live.rootfs_url=http://{{ env.bastion.networking.ip }}:8080/agent.{{ ansible_architecture }}-rootfs.img ip={{ zvm.nodes[item].interface.ip }}::{{ zvm.gateway }}:{{ zvm.subnetmask }}{% if zvm.network_mode | lower != 'roce' %}::{{ zvm.nodes[item].interface.ifname }}:none{% endif %} nameserver={{ zvm.nameserver }} zfcp.allow_lun_scan=0 {% if zvm.network_mode | lower != 'roce' %}rd.znet={{ zvm.nodes[item].interface.nettype }},{{ zvm.nodes[item].interface.subchannels }},{{ zvm.nodes[item].interface.options }}{% endif %} {% if zvm.disk_type | lower != 'fcp' %}rd.dasd={{ zvm.nodes[item].dasd.disk_id }}{% else %}rd.zfcp={{ zvm.nodes[item].lun[0].paths[0].fcp}},{{ zvm.nodes[item].lun[0].paths[0].wwpn }},{{ zvm.nodes[item].lun[0].id }} {% endif %} random.trust_cpu=on rd.luks.options=discard ignition.firstboot ignition.platform.id=metal console=tty1 console=ttyS1,115200n8 coreos.inst.persistent-kargs="console=tty1 console=ttyS1,115200n8" \ No newline at end of file