diff --git a/.ci/automation-schema.yaml b/.ci/automation-schema.yaml index ed3eae7e4..415e12a3c 100644 --- a/.ci/automation-schema.yaml +++ b/.ci/automation-schema.yaml @@ -15,8 +15,13 @@ _values: _hook: name: str() type: str() - source: str() + source: str(required=False) inventory: str(required=False) + extra_vars: map(required=False) + definition: map(required=False) + resource_name: str(required=False) + state: str(required=False) + kind: str(required=False) --- _stage: path: str() diff --git a/automation/vars/uni01alpha.yaml b/automation/vars/uni01alpha.yaml new file mode 100644 index 000000000..320929639 --- /dev/null +++ b/automation/vars/uni01alpha.yaml @@ -0,0 +1,63 @@ +--- +vas: + uni01alpha: + stages: + - path: examples/dt/uni01alpha/control-plane/nncp + wait_conditions: + - >- + oc -n openstack wait nncp + -l osp/nncm-config-type=standard + --for jsonpath='{.status.conditions[0].reason}'=SuccessfullyConfigured + --timeout=60s + values: + - name: network-values + src_file: values.yaml + build_output: nncp.yaml + + - pre_stage_run: + - name: Apply cinder-lvm label on master-0 + type: cr + definition: + metadata: + labels: + openstack.org/cinder-lvm: "" + kind: Node + resource_name: master-0 + state: patched + path: examples/dt/uni01alpha/control-plane + wait_conditions: + - >- + oc -n openstack wait openstackcontrolplane + controlplane + --for condition=Ready + --timeout=30m + values: + - name: network-values + src_file: nncp/values.yaml + - name: service-values + src_file: service-values.yaml + build_output: control-plane.yaml + + - path: examples/dt/uni01alpha/networker + wait_conditions: + - >- + oc -n openstack wait openstackdataplanedeployment + networker-deploy + --for condition=Ready + --timeout=1200s + values: + - name: edpm-values + src_file: values.yaml + build_output: edpm-networker.yaml + + - path: examples/dt/uni01alpha + wait_conditions: + - >- + oc -n openstack wait openstackdataplanedeployment + edpm-deployment + --for condition=Ready + --timeout=1200s + values: + - name: edpm-values + src_file: values.yaml + build_output: edpm.yaml diff --git a/dt/uni01alpha/README.md b/dt/uni01alpha/README.md new file mode 100644 index 000000000..d89f09682 --- /dev/null +++ b/dt/uni01alpha/README.md @@ -0,0 +1,11 @@ +# Deployed Topology - Alpha + +If you are looking for information on how to deploy the alpha based DT, then +please the [README](../../examples/dt/uni01alpha/README.md) in the examples +directory. + +This directory `dt/uni01alpha/`, exists so that the +[kustomization.yaml](../../examples/dt/uni01alpha/kustomization.yaml) in +the examples directory of uni01alpha topology, reference it by path as a +component. It's contents are likely uninteresting unless you want to understand +how kustomize was implemented in this repository. diff --git a/dt/uni01alpha/edpm/kustomization.yaml b/dt/uni01alpha/edpm/kustomization.yaml new file mode 100644 index 000000000..a0545c8e5 --- /dev/null +++ b/dt/uni01alpha/edpm/kustomization.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: kustomize.config.k8s.io/v1alpha1 +kind: Component + +transformers: + - |- + apiVersion: builtin + kind: NamespaceTransformer + metadata: + name: _ignored_ + namespace: openstack + setRoleBindingSubjects: none + unsetOnly: true + fieldSpecs: + - path: metadata/name + kind: Namespace + create: true + +components: + - ../../../lib/dataplane diff --git a/dt/uni01alpha/kustomization.yaml b/dt/uni01alpha/kustomization.yaml new file mode 100644 index 000000000..3352b20a0 --- /dev/null +++ b/dt/uni01alpha/kustomization.yaml @@ -0,0 +1,362 @@ +--- +apiVersion: kustomize.config.k8s.io/v1alpha1 +kind: Component + +transformers: + - |- + apiVersion: builtin + kind: NamespaceTransformer + metadata: + name: _ignored_ + namespace: openstack + setRoleBindingSubjects: none + unsetOnly: true + fieldSpecs: + - path: metadata/name + kind: Namespace + create: true + +components: + - ../../lib/networking/metallb + - ../../lib/networking/netconfig + - ../../lib/networking/nad + - ../../lib/control-plane + +resources: + - ocp_networks_octavia_netattach.yaml + +replacements: + - source: + kind: ConfigMap + name: service-values + fieldPath: data.cinderVolumes.lvm-iscsi.replicas + targets: + - select: + kind: OpenStackControlPlane + fieldPaths: + - spec.cinder.template.cinderVolumes.lvm-iscsi.replicas + options: + create: true + + - source: + kind: ConfigMap + name: service-values + fieldPath: data.cinderVolumes.lvm-iscsi.nodeSelector.openstack\.org/cinder-lvm + targets: + - select: + kind: OpenStackControlPlane + fieldPaths: + - spec.cinder.template.cinderVolumes.lvm-iscsi.nodeSelector.openstack\.org/cinder-lvm + options: + create: true + + - source: + kind: ConfigMap + name: service-values + fieldPath: data.cinderVolumes.lvm-iscsi.customServiceConfig + targets: + - select: + kind: OpenStackControlPlane + fieldPaths: + - spec.cinder.template.cinderVolumes.lvm-iscsi.customServiceConfig + options: + create: true + + - source: + kind: ConfigMap + name: service-values + fieldPath: data.cinderBackup.customServiceConfig + targets: + - select: + kind: OpenStackControlPlane + fieldPaths: + - spec.cinder.template.cinderBackup.customServiceConfig + options: + create: true + + - source: + kind: ConfigMap + name: service-values + fieldPath: data.cinderBackup.replicas + targets: + - select: + kind: OpenStackControlPlane + fieldPaths: + - spec.cinder.template.cinderBackup.replicas + options: + create: true + + - source: + kind: ConfigMap + name: service-values + fieldPath: data.glance.default.replicas + targets: + - select: + kind: OpenStackControlPlane + fieldPaths: + - spec.glance.template.glanceAPIs.default.replicas + options: + create: true + + - source: + kind: ConfigMap + name: service-values + fieldPath: data.glance.customServiceConfig + targets: + - select: + kind: OpenStackControlPlane + fieldPaths: + - spec.glance.template.customServiceConfig + options: + create: true + + - source: + kind: ConfigMap + name: service-values + fieldPath: data.glance.default.replicas + targets: + - select: + kind: OpenStackControlPlane + fieldPaths: + - spec.glance.template.glanceAPIs.default.replicas + options: + create: true + + - source: + kind: ConfigMap + name: service-values + fieldPath: data.swift.enabled + targets: + - select: + kind: OpenStackControlPlane + fieldPaths: + - spec.swift.enabled + options: + create: true + + - source: + kind: ConfigMap + name: service-values + fieldPath: data.ironic.enabled + targets: + - select: + kind: OpenStackControlPlane + fieldPaths: + - spec.ironic.enabled + options: + create: true + + - source: + kind: ConfigMap + name: service-values + fieldPath: data.octavia.enabled + targets: + - select: + kind: OpenStackControlPlane + fieldPaths: + - spec.octavia.enabled + options: + create: true + + - source: + kind: ConfigMap + name: service-values + fieldPath: data.octavia.amphoraImageContainerImage + targets: + - select: + kind: OpenStackControlPlane + fieldPaths: + - spec.octavia.amphoraImageContainerImage + options: + create: true + + - source: + kind: ConfigMap + name: service-values + fieldPath: data.octavia.apacheContainerImage + targets: + - select: + kind: OpenStackControlPlane + fieldPaths: + - spec.octavia.apacheContainerImage + options: + create: true + + - source: + kind: ConfigMap + name: service-values + fieldPath: data.octavia.octaviaAPI.networkAttachments + targets: + - select: + kind: OpenStackControlPlane + fieldPaths: + - spec.octavia.template.octaviaAPI.networkAttachments + options: + create: true + + - source: + kind: ConfigMap + name: service-values + fieldPath: data.octavia.octaviaHousekeeping.networkAttachments + targets: + - select: + kind: OpenStackControlPlane + fieldPaths: + - spec.octavia.template.octaviaHousekeeping.networkAttachments + options: + create: true + + - source: + kind: ConfigMap + name: service-values + fieldPath: data.octavia.octaviaHealthManager.networkAttachments + targets: + - select: + kind: OpenStackControlPlane + fieldPaths: + - spec.octavia.template.octaviaHealthManager.networkAttachments + options: + create: true + + - source: + kind: ConfigMap + name: service-values + fieldPath: data.octavia.octaviaWorker.networkAttachments + targets: + - select: + kind: OpenStackControlPlane + fieldPaths: + - spec.octavia.template.octaviaWorker.networkAttachments + options: + create: true + + - source: + kind: ConfigMap + name: service-values + fieldPath: data.redis.enabled + targets: + - select: + kind: OpenStackControlPlane + fieldPaths: + - spec.redis.enabled + options: + create: true + + - source: + kind: ConfigMap + name: service-values + fieldPath: data.heat.enabled + targets: + - select: + kind: OpenStackControlPlane + fieldPaths: + - spec.heat.enabled + options: + create: true + + - source: + kind: ConfigMap + name: service-values + fieldPath: data.telemetry.enabled + targets: + - select: + kind: OpenStackControlPlane + fieldPaths: + - spec.telemetry.enabled + options: + create: true + + - source: + kind: ConfigMap + name: service-values + fieldPath: data.telemetry.metricStorage.enabled + targets: + - select: + kind: OpenStackControlPlane + fieldPaths: + - spec.telemetry.template.metricStorage.enabled + options: + create: true + + - source: + kind: ConfigMap + name: service-values + fieldPath: data.telemetry.autoscaling.enabled + targets: + - select: + kind: OpenStackControlPlane + fieldPaths: + - spec.telemetry.template.autoscaling.enabled + options: + create: true + + - source: + kind: ConfigMap + name: service-values + fieldPath: data.telemetry.ceilometer.enabled + targets: + - select: + kind: OpenStackControlPlane + fieldPaths: + - spec.telemetry.template.ceilometer.enabled + options: + create: true + + - source: + kind: ConfigMap + name: service-values + fieldPath: data.ovn.ovnController.availability-zones + targets: + - select: + kind: OpenStackControlPlane + fieldPaths: + - spec.ovn.template.ovnController.external-ids.availability-zones + options: + create: true + + - source: + kind: ConfigMap + name: service-values + fieldPath: data.ovn.ovnController.nicMappings + targets: + - select: + kind: OpenStackControlPlane + fieldPaths: + - spec.ovn.template.ovnController.nicMappings + options: + create: true + + - source: + kind: ConfigMap + name: service-values + fieldPath: data.neutron.customServiceConfig + targets: + - select: + kind: OpenStackControlPlane + fieldPaths: + - spec.neutron.template.customServiceConfig + options: + create: true + + - source: + kind: ConfigMap + name: service-values + fieldPath: data.nova.schedulerServiceTemplate.customServiceConfig + targets: + - select: + kind: OpenStackControlPlane + fieldPaths: + - spec.nova.template.schedulerServiceTemplate.customServiceConfig + options: + create: true + + - source: + kind: ConfigMap + name: network-values + fieldPath: data.octavia.net-attach-def + targets: + - select: + kind: NetworkAttachmentDefinition + name: octavia + fieldPaths: + - spec.config diff --git a/dt/uni01alpha/namespace.yaml b/dt/uni01alpha/namespace.yaml new file mode 100644 index 000000000..60a6e8c42 --- /dev/null +++ b/dt/uni01alpha/namespace.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: builtin +kind: NamespaceTransformer +metadata: + name: _ignored_ + namespace: openstack +setRoleBindingSubjects: none +unsetOnly: true +fieldSpecs: + - path: metadata/name + kind: Namespace + create: true diff --git a/dt/uni01alpha/networker/kustomization.yaml b/dt/uni01alpha/networker/kustomization.yaml new file mode 100644 index 000000000..e4014eb54 --- /dev/null +++ b/dt/uni01alpha/networker/kustomization.yaml @@ -0,0 +1,55 @@ +--- +apiVersion: kustomize.config.k8s.io/v1alpha1 +kind: Component + +transformers: + - |- + apiVersion: builtin + kind: NamespaceTransformer + metadata: + name: _ignored_ + namespace: openstack + setRoleBindingSubjects: none + unsetOnly: true + fieldSpecs: + - path: metadata/name + kind: Namespace + create: true + +components: + - ../../../lib/dataplane + +patches: + - target: + kind: OpenStackDataPlaneDeployment + name: .* + patch: |- + - op: replace + path: /metadata/name + value: networker-deploy + + - target: + kind: OpenStackDataPlaneDeployment + name: .* + patch: |- + - op: replace + path: /spec/nodeSets + value: + - networker-nodes + + - target: + kind: OpenStackDataPlaneNodeSet + name: .* + patch: |- + - op: replace + path: /metadata/name + value: networker-nodes + + - target: + kind: Secret + name: nova-migration-ssh-key + patch: |- + - op: add + path: /metadata/annotations + value: + config.kubernetes.io/local-config: true diff --git a/dt/uni01alpha/nncp/kustomization.yaml b/dt/uni01alpha/nncp/kustomization.yaml new file mode 100644 index 000000000..da4d82899 --- /dev/null +++ b/dt/uni01alpha/nncp/kustomization.yaml @@ -0,0 +1,187 @@ +--- +apiVersion: kustomize.config.k8s.io/v1alpha1 +kind: Component + +transformers: + - |- + apiVersion: builtin + kind: NamespaceTransformer + metadata: + name: _ignored_ + namespace: openstack + setRoleBindingSubjects: none + unsetOnly: true + fieldSpecs: + - path: metadata/name + kind: Namespace + create: true + +components: + - ../../../lib/nncp + +patches: + - target: + kind: NodeNetworkConfigurationPolicy + name: master-0 + patch: |- + - op: add + path: /spec/desiredState/interfaces/- + value: + description: Octavia vlan host interface + name: octavia + state: up + type: vlan + vlan: + base-iface: _replaced_ + id: _replaced_ + + - target: + kind: NodeNetworkConfigurationPolicy + name: master-0 + patch: |- + - op: add + path: /spec/desiredState/interfaces/- + value: + description: Octavia bridge + mtu: 1500 + name: octbr + type: linux-bridge + bridge: + options: + stp: + enabled: false + port: + - name: octavia + + - target: + kind: NodeNetworkConfigurationPolicy + name: master-1 + patch: |- + - op: add + path: /spec/desiredState/interfaces/- + value: + description: Octavia vlan host interface + name: octavia + state: up + type: vlan + vlan: + base-iface: _replaced_ + id: _replaced_ + + - target: + kind: NodeNetworkConfigurationPolicy + name: master-1 + patch: |- + - op: add + path: /spec/desiredState/interfaces/- + value: + description: Octavia bridge + mtu: 1500 + name: octbr + type: linux-bridge + bridge: + options: + stp: + enabled: false + port: + - name: octavia + + - target: + kind: NodeNetworkConfigurationPolicy + name: master-2 + patch: |- + - op: add + path: /spec/desiredState/interfaces/- + value: + description: Octavia vlan host interface + name: octavia + state: up + type: vlan + vlan: + base-iface: _replaced_ + id: _replaced_ + + - target: + kind: NodeNetworkConfigurationPolicy + name: master-2 + patch: |- + - op: add + path: /spec/desiredState/interfaces/- + value: + description: Octavia bridge + mtu: 1500 + name: octbr + type: linux-bridge + bridge: + options: + stp: + enabled: false + port: + - name: octavia + +replacements: + - source: + kind: ConfigMap + name: network-values + fieldPath: data.octavia.base_iface + targets: + - select: + kind: NodeNetworkConfigurationPolicy + name: master-0 + fieldPaths: + - spec.desiredState.interfaces.[name=octavia].vlan.base-iface + + - source: + kind: ConfigMap + name: network-values + fieldPath: data.octavia.vlan + targets: + - select: + kind: NodeNetworkConfigurationPolicy + name: master-0 + fieldPaths: + - spec.desiredState.interfaces.[name=octavia].vlan.id + + - source: + kind: ConfigMap + name: network-values + fieldPath: data.octavia.base_iface + targets: + - select: + kind: NodeNetworkConfigurationPolicy + name: master-1 + fieldPaths: + - spec.desiredState.interfaces.[name=octavia].vlan.base-iface + + - source: + kind: ConfigMap + name: network-values + fieldPath: data.octavia.vlan + targets: + - select: + kind: NodeNetworkConfigurationPolicy + name: master-1 + fieldPaths: + - spec.desiredState.interfaces.[name=octavia].vlan.id + + - source: + kind: ConfigMap + name: network-values + fieldPath: data.octavia.base_iface + targets: + - select: + kind: NodeNetworkConfigurationPolicy + name: master-2 + fieldPaths: + - spec.desiredState.interfaces.[name=octavia].vlan.base-iface + + - source: + kind: ConfigMap + name: network-values + fieldPath: data.octavia.vlan + targets: + - select: + kind: NodeNetworkConfigurationPolicy + name: master-2 + fieldPaths: + - spec.desiredState.interfaces.[name=octavia].vlan.id diff --git a/dt/uni01alpha/ocp_networks_octavia_netattach.yaml b/dt/uni01alpha/ocp_networks_octavia_netattach.yaml new file mode 100644 index 000000000..d59e60095 --- /dev/null +++ b/dt/uni01alpha/ocp_networks_octavia_netattach.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: k8s.cni.cncf.io/v1 +kind: NetworkAttachmentDefinition +metadata: + name: octavia + labels: + osp/net: octavia + osp/net-attach-def-type: standard diff --git a/examples/dt/uni01alpha/.gitignore b/examples/dt/uni01alpha/.gitignore new file mode 100644 index 000000000..1ae5f072d --- /dev/null +++ b/examples/dt/uni01alpha/.gitignore @@ -0,0 +1 @@ +data-plane.yaml diff --git a/examples/dt/uni01alpha/README.md b/examples/dt/uni01alpha/README.md new file mode 100644 index 000000000..fe195a35c --- /dev/null +++ b/examples/dt/uni01alpha/README.md @@ -0,0 +1,317 @@ +# Deployed Topology - Alpha + +This document contains a list of integration test suites that would be +executed against the below specified topology of Red Hat OpenStack Services +on OpenShift. It also contains a collection of custom resources (CRs) for +deploying the test environment. + +## Purpose + +This topology is used for executing integration tests that evaluate the +`default` backends of the below mentioned services. + +## Environment + +### Nodes + +| Role | Machine Type | Count | +| ----------------- | ------------ | ----- | +| Compact OpenShift | vm | 3 | +| OpenStack Compute | vm | 2 | +| Networker | vm | 3 | +| Test nodes | vm | 2 | + +### Networks + +| Name | Type | Interface | CIDR | +| ------------ | -------- | --------- | --------------- | +| Provisioning | untagged | nic1 | 172.22.0.0/24 | +| Machine | untagged | nic2 | 192.168.32.0/20 | +| RH OSP | trunk | nic3 | | + +#### VLAN networks in RH OSP + +| Name | Type | CIDR | +| ----------- | ----------- | ----------------- | +| ctlplane | untagged | 192.168.122.0/24 | +| internalapi | VLAN tagged | 172.17.0.0/24 | +| storage | VLAN tagged | 172.18.0.0/24 | +| tenant | VLAN tagged | 172.19.0.0/24 | +| octavia | VLAN tagged | 172.20.0.0/24 | + +### Services, enabled features and configurations + +| Service | configuration | Lock-in coverage? | +| ---------------- | ---------------- | ------------------ | +| Cinder | LVM/iSCSI/lioadm | Must have | +| Cinder Backup | Swift | Must have | +| Glance | Swift | Must have | +| Swift | (default) | Must have | +| Octavia | (amphora) | Must have | +| Horizon | N/A | Must have | +| Barbican | (default) | Must have | +| Telemetry | | Must have | +| Ironic | | Must have | +| Neutron | OVN - AZs | Must have | + +#### Support services + +The following table lists services which are not the main focus of the testing +(which may be covered by additional scenarios), but are required for the DT to +work properly and can be deployed with any/default configuration. + +| Service | Reason | +| ---------------- |--------------------------- | +| Nova | needed by scenario testing | +| Keystone | needed by all services | +| Ceilometer | needed by Telemetry | +| Heat | needed by Telemetry | +| Prometheus | needed by Telemetry | +| Redis | needed by Octavia | + +### Additional configuration + +- Always-on, default services and features: TLSe +- Two additional fake baremetal nodes +- Availability zones for OVN (zone-1 & zone-2) +- Logical volume with the name cinder-volumes exists on a OpenShift node. +- iSCSI service is enabled on all OpenShift nodes. +- Multipath service is enabled on all OpenShift nodes. +- Cluster Observability Operator is installed on the platform. + +#### iSCSI + +It is assumed *iSCSI* services are enabled in all nodes participating in the +Red Hat OpenShift cluster. If not, a `MachineConfig` similar to the below one +is applied. The node would be *rebooted* after applying the configuration. + +```YAML +--- +apiVersion: machineconfiguration.openshift.io/v1 +kind: MachineConfig +metadata: + labels: + machineconfiguration.openshift.io/role: master + service: cinder + name: 90-master-cinder-enable-iscsid +spec: + config: + ignition: + version: 3.2.0 + systemd: + units: + - enabled: true + name: iscsid.service +``` + +#### Multipath + +It is assumed *multipath* services are enabled in all nodes particpating in the +Red Hat OpenShift cluster. If not, a `MachineConfig` like the one below must be +applied. The node would be *rebooted* after applying the configuration. + +```YAML +--- +apiVersion: machineconfiguration.openshift.io/v1 +kind: MachineConfig +metadata: + labels: + machineconfiguration.openshift.io/role: master + service: cinder + name: 91-master-cinder-enable-multipathd +spec: + config: + ignition: + version: 3.2.0 + storage: + files: + - path: /etc/multipath.conf + overwrite: false + # Mode must be decimal, this is 0600 + mode: 384 + user: + name: root + group: + name: root + contents: + # Source can be a http, https, tftp, s3, gs, or data as defined in rfc2397. + # This is the rfc2397 text/plain string format + source: data:,defaults%20%7B%0A%20%20user_friendly_names%20no%0A%20%20recheck_wwid%20yes%0A%20%20skip_kpartx%20yes%0A%20%20find_multipaths%20yes%0A%7D%0A%0Ablacklist%20%7B%0A%7D + systemd: + units: + - enabled: true + name: multipathd.service +``` + +The plain text contents of the multipath configuration file is + +```conf +defaults { + user_friendly_names no + recheck_wwid yes + skip_kpartx yes + find_multipaths yes +} +blacklist { +} +``` + +#### Cinder backend - LVM + +It is assumed that worker nodes or the master nodes have extra disks and there +exists a logical volume with the name *cinder_volumes*. If not, a +`MachineConfig` like the one below must be applied. + +```YAML +--- +apiVersion: machineconfiguration.openshift.io/v1 +kind: MachineConfig +metadata: + name: 92-create-logical-volume + labels: + machineconfiguration.openshift.io/role: master + service: cinder +spec: + config: + ignition: + version: 3.2.0 + storage: + disks: + - device: "/dev/sdb" + wipeTable: true + - device: "/dev/sdc" + wipeTable: true + files: + - path: /usr/local/bin/lv-cinder-volumes.sh + overwrite: false + mode: 493 + user: + name: root + group: + name: root + contents: + source: data:text/plain;charset=utf-8;base64,{{ _replaced_ }} + systemd: + units: + - name: lv-cinder-volumes.service + enabled: true + contents: | + [Unit] + Description=Create logical volume with name cinder-volumes. + After=var.mount systemd-udev-settle.service + + [Service] + Type=oneshot + ExecStart=/usr/local/bin/lv-cinder-volumes.sh + RemainAfterExit=yes + + [Install] + WantedBy=multi-user.target +``` + +*{{ _replaced_ }}* should be replaced with the base64 encoding of the below script. + +```bash +#! /usr/bin/bash +set -euo pipefail + +if [[ $(vgdisplay cinder-volumes) ]]; then + echo "cinder-volumes vg exists." + exit 0 +fi + +disks=$(lsblk -o NAME,TYPE | awk '{ if ($2 == "disk" && $1 != "sda") print "/dev/"$1}') +disk_str='' + +for disk in ${disks}; do + pvcreate ${disk} + disk_str="${disk_str} ${disk}" +done + +vgcreate cinder-volumes ${disk_str} +``` + +##### Notes + +The LVM backend for Cinder is a special case as the storage data is on the +OpenShift node and has no external storage systems. This has several +implications + +- To prevent issues with exported volumes, cinder-operator automatically uses + the host network. The backend is configured to use the host's VLAN IP + address. This means that the cinder-volume service doesn't need any + networkAttachments. +- There can only be one node with the label `openstack.org/cinder-lvm=`. Apply + the label using the command + `oc label node openstack.org/cinder-lvm=` + +### Octavia + +Octavia is enabled with the appropriate network attachments configured to +deploy Octavia. It manages amphorae VMs through a self-service tenant network. +The Octavia Amphora controllers get access to it through a Neutron externally +routed flat provider network configured as a SNAT-less gateway for a neutron +router linked to the tenant networks. Host routes on the tenant network's +subnet and routes on the network attachment provide the required `next hop` +routing to establish the necessary bidirectional routing. + +This arrangement requires a network attachment for connecting the OVN and +Amphora Controller pods (octavia-housekeeping, octavia-healthmanager, +octavia-worker). Because Neutron ML2/OVN implements provider networks by +bridging the relevant physical interface - in this case the network-attachment, +there is an additional requirement that this attachment function when +bridged. As the default macvlan attachments do not function when bridged, a +bridge network attachment is used. + +Bridge attachments do not directly provide connectivity outside of the OCP +node. To implement this, the NodeNetworkConfigurationPolicy creates an VLAN +interface as is typical for the other networks, but does not configure an IP +pool as it is not needed. It is also not configured for metallb as it is solely +as part of a way to establish a L2 network link between nodes. The +NodeNetworkConfigurationPolicy also configures an octbr linux bridge which is +configured as the bridge for the network attachment mentioned above. It is also +configured to add the VLAN interface as a port, effectively linking the nodes +and the network attachments. + +```YAML +spec: + octavia: + enabled: true + template: + octaviaAPI: + networkAttachments: + - internalapi + octaviaHousekeeping: + networkAttachments: + - octavia + octaviaWorker: + networkAttachments: + - octavia + octaviaHealthManager: + networkAttachments: + - octavia + + ovn: + template: + ovncontroller: + nicMappings: + datacentre: ospbr + octavia: octbr +``` + +## Testing + +| Test framework | When to run | Special configuration | +| ---------------- | -------------------- | ----------------------| +| relevant volume tests | tempest stage | | +| relevant image tests | tempest stage | | +| relevant object-storage tests | tempest stage | | +| relevant octavia tests | tempest stage | | +| horizon integration | own stage (post-tempest)| | + +## Workflow + +1. [Install the OpenStack K8S operators and their dependencies](../../common/README.md) +2. [Configure and deploy the OpenStack control plane](control-plane.md) +3. [Configure and deploy the OpenStack networker deployment](networker.md) +4. [Configure and deploy the OpenStack data plane](data-plane.md) diff --git a/examples/dt/uni01alpha/control-plane.md b/examples/dt/uni01alpha/control-plane.md new file mode 100644 index 000000000..ffafe5ea2 --- /dev/null +++ b/examples/dt/uni01alpha/control-plane.md @@ -0,0 +1,88 @@ +# Configuring networking and deploy the OpenStack control plane + +## Assumptions + +- A storage class called `local-storage` should already exist. +- Cluster observability operator is already deployed. If not, follow the + steps found [below](#cluster-observability-operator). + +### Cluster observability operator + +Cluster Observability Operator must be installed as it is required by OpenStack +Telemetry operator. If not installed, the below steps can be followed + +```bash +cat > subscription.yaml << EOF +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: observability-operator + namespace: openshift-operators + labels: + operators.coreos.com/observability-operator.openshift-operators: "" +spec: + channel: development + installPlanApproval: Automatic + name: observability-operator + source: redhat-operators + sourceNamespace: openshift-marketplace +EOF + +# Apply the cr +oc apply -f subscription.yaml + +# Wait for the deployment to be ready +oc wait deployments/observability-operator --for condition=Available \ + --timeout=300s +``` + +## Initialize + +Switch to the "openstack" namespace + +```bash +oc project openstack +``` + +Change to the uni01alpha directory + +```bash +cd architecture/examples/dt/uni01alpha +``` + +Edit [service-values.yaml](service-values.yaml) and +[nncp/values.yaml](nncp/values.yaml). + +Apply node network configuration + +```bash +pushd control-plane/nncp +kustomize build > nncp.yaml +oc apply -f nncp.yaml +oc wait nncp \ + -l osp/nncm-config-type=standard \ + --for jsonpath='{.status.conditions[0].reason}'=SuccessfullyConfigured \ + --timeout=300s +popd +``` + +Generate the control-plane and networking CRs. + +```bash +pushd control-plane +kustomize build > control-plane.yaml +``` + +## Create CRs + +```bash +oc apply -f control-plane.yaml +popd +``` + +Wait for control plane to be available + +```bash +oc wait osctlplane controlplane --for condition=Ready --timeout=600s +``` diff --git a/examples/dt/uni01alpha/control-plane/.gitignore b/examples/dt/uni01alpha/control-plane/.gitignore new file mode 100644 index 000000000..3df8f53be --- /dev/null +++ b/examples/dt/uni01alpha/control-plane/.gitignore @@ -0,0 +1 @@ +control-plane.yaml diff --git a/examples/dt/uni01alpha/control-plane/kustomization.yaml b/examples/dt/uni01alpha/control-plane/kustomization.yaml new file mode 100644 index 000000000..ab04064ff --- /dev/null +++ b/examples/dt/uni01alpha/control-plane/kustomization.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +components: + - ../../../../dt/uni01alpha + +resources: + - nncp/values.yaml + - service-values.yaml diff --git a/examples/dt/uni01alpha/control-plane/nncp/.gitignore b/examples/dt/uni01alpha/control-plane/nncp/.gitignore new file mode 100644 index 000000000..51ed8a956 --- /dev/null +++ b/examples/dt/uni01alpha/control-plane/nncp/.gitignore @@ -0,0 +1 @@ +nncp.yaml diff --git a/examples/dt/uni01alpha/control-plane/nncp/kustomization.yaml b/examples/dt/uni01alpha/control-plane/nncp/kustomization.yaml new file mode 100644 index 000000000..26039ef8c --- /dev/null +++ b/examples/dt/uni01alpha/control-plane/nncp/kustomization.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +components: + - ../../../../../dt/uni01alpha/nncp + +resources: + - values.yaml diff --git a/examples/dt/uni01alpha/control-plane/nncp/values.yaml b/examples/dt/uni01alpha/control-plane/nncp/values.yaml new file mode 100644 index 000000000..a1959d0a8 --- /dev/null +++ b/examples/dt/uni01alpha/control-plane/nncp/values.yaml @@ -0,0 +1,226 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: network-values + annotations: + config.kubernetes.io/local-config: "true" + +data: + openstack-operator-image: "quay.io/openstack-k8s-operators/openstack-operator-index:latest" + + node_0: + name: master-0 + internalapi_ip: 172.17.0.5 + tenant_ip: 172.19.0.5 + ctlplane_ip: 192.168.122.10 + storage_ip: 172.18.0.5 + node_1: + name: master-1 + internalapi_ip: 172.17.0.6 + tenant_ip: 172.19.0.6 + ctlplane_ip: 192.168.122.11 + storage_ip: 172.18.0.6 + node_2: + name: master-2 + internalapi_ip: 172.17.0.7 + tenant_ip: 172.19.0.7 + ctlplane_ip: 192.168.122.12 + storage_ip: 172.18.0.7 + + ctlplane: + dnsDomain: ctlplane.openstack.lab + subnets: + - allocationRanges: + - end: 192.168.122.120 + start: 192.168.122.100 + - end: 192.168.122.200 + start: 192.168.122.150 + cidr: 192.168.122.0/24 + gateway: 192.168.122.1 + name: subnet1 + prefix-length: 24 + iface: enp6s0 + mtu: 9000 + lb_addresses: + - 192.168.122.80-192.168.122.90 + endpoint_annotations: + metallb.universe.tf/address-pool: ctlplane + metallb.universe.tf/allow-shared-ip: ctlplane + metallb.universe.tf/loadBalancerIPs: 192.168.122.80 + net-attach-def: | + { + "cniVersion": "0.3.1", + "name": "ctlplane", + "type": "macvlan", + "master": "ospbr", + "ipam": { + "type": "whereabouts", + "range": "192.168.122.0/24", + "range_start": "192.168.122.30", + "range_end": "192.168.122.70" + } + } + + internalapi: + dnsDomain: internalapi.openstack.lab + subnets: + - allocationRanges: + - end: 172.17.0.250 + start: 172.17.0.100 + cidr: 172.17.0.0/24 + name: subnet1 + vlan: 20 + mtu: 1500 + prefix-length: 24 + iface: internalapi + vlan: 20 + base_iface: enp6s0 + lb_addresses: + - 172.17.0.80-172.17.0.90 + endpoint_annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/allow-shared-ip: internalapi + metallb.universe.tf/loadBalancerIPs: 172.17.0.80 + net-attach-def: | + { + "cniVersion": "0.3.1", + "name": "internalapi", + "type": "macvlan", + "master": "internalapi", + "ipam": { + "type": "whereabouts", + "range": "172.17.0.0/24", + "range_start": "172.17.0.30", + "range_end": "172.17.0.70" + } + } + + storage: + dnsDomain: storage.openstack.lab + subnets: + - allocationRanges: + - end: 172.18.0.250 + start: 172.18.0.100 + cidr: 172.18.0.0/24 + name: subnet1 + vlan: 21 + mtu: 9000 + prefix-length: 24 + iface: storage + vlan: 21 + base_iface: enp6s0 + lb_addresses: + - 172.18.0.80-172.18.0.90 + net-attach-def: | + { + "cniVersion": "0.3.1", + "name": "storage", + "type": "macvlan", + "master": "storage", + "ipam": { + "type": "whereabouts", + "range": "172.18.0.0/24", + "range_start": "172.18.0.30", + "range_end": "172.18.0.70" + } + } + + tenant: + dnsDomain: tenant.openstack.lab + subnets: + - allocationRanges: + - end: 172.19.0.250 + start: 172.19.0.100 + cidr: 172.19.0.0/24 + name: subnet1 + vlan: 22 + mtu: 1500 + prefix-length: 24 + iface: tenant + vlan: 22 + base_iface: enp6s0 + lb_addresses: + - 172.19.0.80-172.19.0.90 + net-attach-def: | + { + "cniVersion": "0.3.1", + "name": "tenant", + "type": "macvlan", + "master": "tenant", + "ipam": { + "type": "whereabouts", + "range": "172.19.0.0/24", + "range_start": "172.19.0.30", + "range_end": "172.19.0.70" + } + } + + octavia: + dnsDomain: octavia.openstack.lab + mtu: 1500 + vlan: 23 + base_iface: enp6s0 + net-attach-def: | + { + "cniVersion": "0.3.1", + "name": "octavia", + "type": "bridge", + "bridge": "octbr", + "ipam": { + "type": "whereabouts", + "range": "172.20.0.0/24", + "range_start": "172.20.0.30", + "range_end": "172.20.0.70" + } + } + + external: + dnsDomain: external.openstack.lab + subnets: + - allocationRanges: + - end: 192.168.122.250 + start: 192.168.122.201 + cidr: 192.168.122.0/24 + gateway: 192.168.122.1 + name: subnet1 + mtu: 1500 + + datacentre: + net-attach-def: | + { + "cniVersion": "0.3.1", + "name": "datacentre", + "type": "bridge", + "bridge": "ospbr", + "ipam": {} + } + + dns-resolver: + config: + server: + - 192.168.122.1 + search: [] + options: + - key: server + values: + - 192.168.122.1 + + routes: + config: + - destination: 192.168.122.0/24 + next-hop-address: 192.168.122.1 + next-hop-interface: ospbr + + rabbitmq: + endpoint_annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/loadBalancerIPs: 172.17.0.85 + rabbitmq-cell1: + endpoint_annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/loadBalancerIPs: 172.17.0.86 + + lbServiceType: LoadBalancer + storageClass: host-nfs-storageclass + bridgeName: ospbr diff --git a/examples/dt/uni01alpha/control-plane/service-values.yaml b/examples/dt/uni01alpha/control-plane/service-values.yaml new file mode 100644 index 000000000..8338811b7 --- /dev/null +++ b/examples/dt/uni01alpha/control-plane/service-values.yaml @@ -0,0 +1,145 @@ +--- +apiVersion: v1 +kind: ConfigMap + +metadata: + name: service-values + annotations: + config.kubernetes.io/local-config: "true" + +data: + cinderVolumes: + lvm-iscsi: + replicas: 1 + nodeSelector: + openstack.org/cinder-lvm: "" + customServiceConfig: | + [lvm] + image_volume_cache_enabled = false + volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver + volume_group = cinder-volumes + target_protocol = iscsi + target_helper = lioadm + volume_backend_name = lvm_iscsi + target_ip_address=172.18.0.5 + target_secondary_ip_addresses = 172.19.0.5 + + cinderBackup: + customServiceConfig: | + [DEFAULT] + backup_driver = cinder.backup.drivers.swift.SwiftBackupDriver + replicas: 1 + + glance: + customServiceConfig: | + [DEFAULT] + enabled_backends = default_backend:swift + + [glance_store] + default_backend = default_backend + + [default_backend] + swift_store_create_container_on_put = True + swift_store_auth_version = 3 + swift_store_auth_address = {{ .KeystoneInternalURL }} + swift_store_endpoint_type = internalURL + swift_store_user = service:glance + swift_store_key = {{ .ServicePassword }} + default: + replicas: 1 + + ironic: + enabled: true + + swift: + enabled: true + + # ToDo: octavia needs to be enabled once the issue is fixed. + octavia: + enabled: false + amphoraImageContainerImage: quay.io/gthiemonge/octavia-amphora-image + apacheContainerImage: registry.redhat.io/ubi9/httpd-24:latest + octaviaAPI: + networkAttachments: + - internalapi + octaviaHousekeeping: + networkAttachments: + - octavia + octaviaHealthManager: + networkAttachments: + - octavia + octaviaWorker: + networkAttachments: + - octavia + + heat: + enabled: true + + redis: + enabled: true + + telemetry: + enabled: true + metricStorage: + enabled: true + autoscaling: + enabled: true + ceilometer: + enabled: true + + ovn: + ovnController: + nicMappings: + datacentre: ocpbr + octavia: octbr + availability-zones: + - zone-1 + + neutron: + customServiceConfig: | + [DEFAULT] + vlan_transparent = true + agent_down_time = 600 + router_distributed = true + router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler + allow_automatic_l3agent_failover = true + debug = true + default_availability_zones = zone-1,zone-2 + + [agent] + report_interval = 300 + + [database] + max_retries = -1 + db_max_retries = -1 + + [keystone_authtoken] + region_name = regionOne + memcache_use_advanced_pool = True + + [oslo_messaging_notifications] + driver = noop + + [oslo_middleware] + enable_proxy_headers_parsing = true + + [oslo_policy] + policy_file = /etc/neutron/policy.yaml + + [ovs] + igmp_snooping_enable = true + + [ovn] + ovsdb_probe_interval = 60000 + ovn_emit_need_to_frag = true + + [ml2] + type_drivers = geneve,vxlan,vlan,flat + tenant_network_types = geneve,flat + + # yamllint disable rule:line-length + nova: + schedulerServiceTemplate: + customServiceConfig: | + [filter_scheduler] + enabled_filters = AggregateInstanceExtraSpecsFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,DifferentHostFilter,SameHostFilter,AllHostsFilter,ServerGroupAffinityFilter,ServerGroupAntiAffinityFilter diff --git a/examples/dt/uni01alpha/data-plane.md b/examples/dt/uni01alpha/data-plane.md new file mode 100644 index 000000000..10506533f --- /dev/null +++ b/examples/dt/uni01alpha/data-plane.md @@ -0,0 +1,62 @@ +# Deploying the OpenStack dataplane + +## Assumptions + +- The [control plane](control-plane.md) has been successfully deployed. + +## Initialize + +Switch to the "openstack" namespace + +```bash +oc project openstack +``` + +Change to the alpha's directory + +```bash +cd architecture/examples/dt/uni01alpha +``` + +Modify the [values.yaml](values.yaml) with the following information + +- SSH keys to be used for accessing the deployed compute nodes. +- SSH keys to be use for Nova migration. + +> All values must be in base64 encoded format. + +### Compute access + +1. Set `data['authorized']` with the value of all OpenStack Compute host SSH + keys. +2. Set `data['private']` with the contents of the SSH private key to be used + for accessing the dataplane compute nodes. +3. Set `data['public']` with the contents of the SSH public key used for + accessing the dataplane compute nodes. + +### Nova migration + +1. Set `data['nova']['migration']['ssh_keys']['private']` with the content of + the SSH private key to be used for potential future migration. +2. Set `data['nova']['migration']['ssh_keys']['public']` with the content of + the SSH public key to be used for potential future migration. + +## CRs + +Generate the dataplane CRs. + +```bash +kustomize build > data-plane.yaml +``` + +## Create CRs + +```bash +oc apply -f data-plane.yaml +``` + +Wait for dataplane deployment to finish + +```bash +oc wait osdpd edpm-deployment --for condition=Ready --timeout=1200s +``` diff --git a/examples/dt/uni01alpha/kustomization.yaml b/examples/dt/uni01alpha/kustomization.yaml new file mode 100644 index 000000000..49abcaba5 --- /dev/null +++ b/examples/dt/uni01alpha/kustomization.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +components: + - ../../../dt/uni01alpha/edpm + +resources: + - values.yaml diff --git a/examples/dt/uni01alpha/networker.md b/examples/dt/uni01alpha/networker.md new file mode 100644 index 000000000..ec818e577 --- /dev/null +++ b/examples/dt/uni01alpha/networker.md @@ -0,0 +1,37 @@ +# Configuring and deploying Networker nodes + +## Assumptions + +- The [control plane](control-plane.md) has been created and successfully + deployed. + +## Steps + +```bash +# 1. Switch to openstack namespace + + $ oc project openstack + +# 2. Change the working directory to uni-alpha + + $ pushd architecture/examples/dt/uni01alpha + +# 3. Modify [networker/values.yaml](networker/values.yaml) file to suit your +# environment. + + $ pushd networker + $ vi values.yaml + +# 4. Generate the networker data plane deployment plan. + + $ kustomize build > edpm-networker.yaml + +# 5. Create the CRs + + $ oc apply -f edpm-networker.yaml + +# 6. Wait for Networker data plane deployment to complete + + $ oc wait osdpd networker-deploy --for condition=Ready --timeout=1200s + $ popd +``` diff --git a/examples/dt/uni01alpha/networker/.gitignore b/examples/dt/uni01alpha/networker/.gitignore new file mode 100644 index 000000000..e1774a409 --- /dev/null +++ b/examples/dt/uni01alpha/networker/.gitignore @@ -0,0 +1 @@ +edpm-networker.yaml diff --git a/examples/dt/uni01alpha/networker/kustomization.yaml b/examples/dt/uni01alpha/networker/kustomization.yaml new file mode 100644 index 000000000..11a02f232 --- /dev/null +++ b/examples/dt/uni01alpha/networker/kustomization.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +components: + - ../../../../dt/uni01alpha/networker + +resources: + - values.yaml diff --git a/examples/dt/uni01alpha/networker/values.yaml b/examples/dt/uni01alpha/networker/values.yaml new file mode 100644 index 000000000..e87eb4dfc --- /dev/null +++ b/examples/dt/uni01alpha/networker/values.yaml @@ -0,0 +1,147 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: edpm-values + annotations: + config.kubernetes.io/local-config: "true" + +data: + ssh_keys: + authorized: CHANGEME + private: CHANGEME2 + public: CHANGEME3 + + # Do not remove the nova key. It allows us to reuse dataplane component. + nova: + migration: + ssh_keys: + private: PLACEHOLDER + public: PLACEHOLDER + + nodeset: + ansible: + ansibleUser: cloud-admin + ansiblePort: 22 + ansibleVars: + timesync_ntp_servers: + - hostname: clock.redhat.com + + gather_facts: false + enable_debug: false + + edpm_nodes_validation_validate_controllers_icmp: false + edpm_nodes_validation_validate_gateway_icmp: false + + edpm_network_config_hide_sensitive_logs: false + + edpm_enable_chassis_gw: true + edpm_ovn_availability_zones: + - zone-2 + + edpm_sshd_configure_firewall: true + edpm_sshd_allowed_ranges: + - '192.168.122.0/24' + + edpm_network_config_template: | + --- + {% set mtu_list = [ctlplane_mtu] %} + {% for network in nodeset_networks %} + {{ mtu_list.append(lookup('vars', networks_lower[network] ~ '_mtu')) }} + {%- endfor %} + {% set min_viable_mtu = mtu_list | max %} + network_config: + - type: ovs_bridge + name: {{ neutron_physical_bridge_name }} + mtu: {{ min_viable_mtu }} + use_dhcp: false + dns_servers: {{ ctlplane_dns_nameservers }} + domain: {{ dns_search_domains }} + addresses: + - ip_netmask: {{ ctlplane_ip }}/{{ ctlplane_cidr }} + routes: {{ ctlplane_host_routes }} + members: + - type: interface + name: nic2 + mtu: {{ min_viable_mtu }} + # force the MAC address of the bridge to this interface + primary: true + {% for network in nodeset_networks %} + - type: vlan + mtu: {{ lookup('vars', networks_lower[network] ~ '_mtu') }} + vlan_id: {{ lookup('vars', networks_lower[network] ~ '_vlan_id') }} + addresses: + - ip_netmask: >- + {{ + lookup('vars', networks_lower[network] ~ '_ip') + }}/{{ + lookup('vars', networks_lower[network] ~ '_cidr') + }} + routes: {{ lookup('vars', networks_lower[network] ~ '_host_routes') }} + {% endfor %} + + neutron_physical_bridge_name: br-ex + neutron_public_interface_name: eth0 + + networks: + - defaultRoute: true + name: ctlplane + subnetName: subnet1 + - name: internalapi + subnetName: subnet1 + - name: tenant + subnetName: subnet1 + + nodes: + edpm-networker-0: + hostName: edpm-networker-0 + ansible: + ansibleHost: 192.168.122.105 + networks: + - name: ctlplane + subnetName: subnet1 + defaultRoute: true + fixedIP: 192.168.122.105 + - name: internalapi + subnetName: subnet1 + - name: tenant + subnetName: subnet1 + edpm-networker-1: + hostName: edpm-networker-1 + ansible: + ansibleHost: 192.168.122.106 + networks: + - name: ctlplane + subnetName: subnet1 + defaultRoute: true + fixedIP: 192.168.122.106 + - name: internalapi + subnetName: subnet1 + - name: tenant + subnetName: subnet1 + edpm-networker-2: + hostName: edpm-networker-2 + ansible: + ansibleHost: 192.168.122.107 + networks: + - name: ctlplane + subnetName: subnet1 + defaultRoute: true + fixedIP: 192.168.122.107 + - name: internalapi + subnetName: subnet1 + - name: tenant + subnetName: subnet1 + + services: + - bootstrap + - configure-network + - validate-network + - install-os + - configure-os + - ssh-known-hosts + - run-os + - reboot-os + - install-certs + - ovn + - neutron-metadata diff --git a/examples/dt/uni01alpha/values.yaml b/examples/dt/uni01alpha/values.yaml new file mode 100644 index 000000000..d54efd31f --- /dev/null +++ b/examples/dt/uni01alpha/values.yaml @@ -0,0 +1,140 @@ +--- +apiVersion: v1 +kind: ConfigMap + +metadata: + name: edpm-values + annotations: + config.kubernetes.io/local-config: "true" + +data: + ssh_keys: + authorized: _replaced_ + private: _replaced_ + public: _replaced_ + + nova: + migration: + ssh_keys: + private: _replaced_ + public: _replaced_ + + nodeset: + ansible: + ansibleUser: cloud-admin + ansiblePort: 22 + ansibleVars: + service_net_map: + nova_api_network: internalapi + nova_libvirt_network: internalapi + timesync_ntp_servers: + - hostname: clock.redhat.com + edpm_network_config_hide_sensitive_logs: false + edpm_network_config_template: | + --- + {% set mtu_list = [ctlplane_mtu] %} + {% for network in nodeset_networks %} + {{ mtu_list.append(lookup('vars', networks_lower[network] ~ '_mtu')) }} + {%- endfor %} + {% set min_viable_mtu = mtu_list | max %} + network_config: + - type: ovs_bridge + name: {{ neutron_physical_bridge_name }} + mtu: {{ min_viable_mtu }} + use_dhcp: false + dns_servers: {{ ctlplane_dns_nameservers }} + domain: {{ dns_search_domains }} + addresses: + - ip_netmask: {{ ctlplane_ip }}/{{ ctlplane_cidr }} + routes: {{ ctlplane_host_routes }} + members: + - type: interface + name: nic2 + mtu: {{ min_viable_mtu }} + primary: true + {% for network in nodeset_networks %} + - type: vlan + mtu: {{ lookup('vars', networks_lower[network] ~ '_mtu') }} + vlan_id: {{ lookup('vars', networks_lower[network] ~ '_vlan_id') }} + addresses: + - ip_netmask: >- + {{ + lookup('vars', networks_lower[network] ~ '_ip') + }}/{{ + lookup('vars', networks_lower[network] ~ '_cidr') + }} + routes: {{ lookup('vars', networks_lower[network] ~ '_host_routes') }} + {% endfor %} + neutron_physical_bridge_name: br-ex + neutron_public_interface_name: eth0 + + edpm_nodes_validation_validate_controllers_icmp: false + edpm_nodes_validation_validate_gateway_icmp: false + + edpm_selinux_mode: enforcing + edpm_sshd_configure_firewall: true + edpm_sshd_allowed_ranges: + - 192.168.122.0/24 + + enable_debug: false + gather_facts: false + + networks: + - defaultRoute: true + name: ctlplane + subnetName: subnet1 + - name: internalapi + subnetName: subnet1 + - name: storage + subnetName: subnet1 + - name: tenant + subnetName: subnet1 + + nodes: + edpm-compute-0: + ansible: + ansibleHost: 192.168.122.100 + hostName: edpm-compute-0 + networks: + - defaultRoute: true + fixedIP: 192.168.122.100 + name: ctlplane + subnetName: subnet1 + - name: internalapi + subnetName: subnet1 + - name: storage + subnetName: subnet1 + - name: tenant + subnetName: subnet1 + + edpm-compute-1: + ansible: + ansibleHost: 192.168.122.101 + hostName: edpm-compute-1 + networks: + - defaultRoute: true + fixedIP: 192.168.122.101 + name: ctlplane + subnetName: subnet1 + - name: internalapi + subnetName: subnet1 + - name: storage + subnetName: subnet1 + - name: tenant + subnetName: subnet1 + + services: + - bootstrap + - configure-network + - validate-network + - install-os + - configure-os + - ssh-known-hosts + - run-os + - reboot-os + - install-certs + - ovn + - neutron-metadata + - libvirt + - nova + - telemetry diff --git a/lib/control-plane/kustomization.yaml b/lib/control-plane/kustomization.yaml index d8a024fa4..cc2c71681 100644 --- a/lib/control-plane/kustomization.yaml +++ b/lib/control-plane/kustomization.yaml @@ -10,6 +10,19 @@ secretGenerator: options: disableNameSuffixHash: true + - name: libvirt-secret + behavior: create + literals: + - LibvirtPassword=12345678 + options: + disableNameSuffixHash: true + + - name: octavia-ca-passphrase + literals: + - server-ca-passphrase=12345678 + options: + disableNameSuffixHash: true + resources: - openstackcontrolplane.yaml diff --git a/lib/control-plane/openstackcontrolplane.yaml b/lib/control-plane/openstackcontrolplane.yaml index 99dee0fbe..bba5572d6 100644 --- a/lib/control-plane/openstackcontrolplane.yaml +++ b/lib/control-plane/openstackcontrolplane.yaml @@ -137,6 +137,9 @@ spec: databaseInstance: openstack octaviaAPI: replicas: 1 + octaviaHousekeeping: {} + octaviaWorker: {} + octaviaHealthManager: {} secret: osp-secret ovn: template: @@ -182,3 +185,31 @@ spec: ringReplicas: 1 swiftStorage: replicas: 1 + telemetry: + enabled: false + template: + metricStorage: + enabled: false + monitoringStack: + alertingEnabled: true + scrapeInterval: 30s + storage: + strategy: persistent + retention: 24h + persistent: + pvcStorageRequest: 10Gi + pvcStorageClass: local-storage + autoscaling: + enabled: false + aodh: + passwordSelectors: + databaseInstance: openstack + memcachedInstance: memcached + secret: osp-secret + heatInstance: heat + ceilometer: + enabled: false + secret: osp-secret + logging: + enabled: false + port: 10514 diff --git a/lib/control-plane/osp-secrets.env b/lib/control-plane/osp-secrets.env index 5e67fa367..3a9d161e6 100644 --- a/lib/control-plane/osp-secrets.env +++ b/lib/control-plane/osp-secrets.env @@ -1,4 +1,8 @@ AdminPassword=12345678 +AodhDatabasePassword=12345678 +AodhPassword=12345678 +BarbicanDatabasePassword=12345678 +BarbicanPassword=12345678 CeilometerPassword=12345678 CinderDatabasePassword=12345678 CinderPassword=12345678 @@ -26,6 +30,7 @@ NovaCell0DatabasePassword=12345678 NovaCell1DatabasePassword=12345678 NovaPassword=12345678 OctaviaDatabasePassword=12345678 +OctaviaHeartbeatKey=12345678 OctaviaPassword=12345678 PlacementDatabasePassword=12345678 PlacementPassword=12345678