Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added support for NS autoloadbalancing #4161

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 11 additions & 11 deletions suites/reef/nvmeof/tier-2_nvmeof_4nodes_gateway_ha_tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ tests:

# NVMe 4-GW HA Test with mTLS configuration
- test:
abort-on-fail: true
abort-on-fail: false
config:
rbd_pool: rbd
do_not_create_image: true
Expand Down Expand Up @@ -184,7 +184,7 @@ tests:

# NVMe 4-GW HA Test with mTLS-to-Non-mTLS switch configuration
- test:
abort-on-fail: true
abort-on-fail: false
config:
rbd_pool: rbd
do_not_create_image: true
Expand Down Expand Up @@ -242,7 +242,7 @@ tests:
# Non-mTLS Tests
# NVMe 4-GW Single node failure(s)
- test:
abort-on-fail: true
abort-on-fail: false
config:
rbd_pool: rbd
do_not_create_image: true
Expand Down Expand Up @@ -298,7 +298,7 @@ tests:
polarion-id: CEPH-83589016

- test:
abort-on-fail: true
abort-on-fail: false
config:
rbd_pool: rbd
do_not_create_image: true
Expand Down Expand Up @@ -385,7 +385,7 @@ tests:

# 4GW HA Single-sub multinode Failover and failback parallely via ceph orchestrator daemon
- test:
abort-on-fail: true
abort-on-fail: false
config:
rbd_pool: rbd
do_not_create_image: true
Expand Down Expand Up @@ -446,7 +446,7 @@ tests:

# 4GW Multi node sequential failover-failback
- test:
abort-on-fail: true
abort-on-fail: false
config:
rbd_pool: rbd
do_not_create_image: true
Expand Down Expand Up @@ -507,7 +507,7 @@ tests:

# 4GW HA 2-subsystems multinode Failover and failback parallely
- test:
abort-on-fail: true
abort-on-fail: false
config:
rbd_pool: rbd
do_not_create_image: true
Expand Down Expand Up @@ -578,7 +578,7 @@ tests:

# 4GW HA 4-subsystems multinode Failover and failback parallely
- test:
abort-on-fail: true
abort-on-fail: false
config:
rbd_pool: rbd
do_not_create_image: true
Expand Down Expand Up @@ -669,7 +669,7 @@ tests:

# 4GW HA 4-subsystems multinode Failover and failback parallely
- test:
abort-on-fail: true
abort-on-fail: false
config:
rbd_pool: rbd
do_not_create_image: true
Expand Down Expand Up @@ -762,7 +762,7 @@ tests:

# 4GW HA 4-subsystems node Failover and failback using power off|on
- test:
abort-on-fail: true
abort-on-fail: false
config:
rbd_pool: rbd
do_not_create_image: true
Expand Down Expand Up @@ -851,7 +851,7 @@ tests:

# 4GW HA 4-subsystems node Failover and failback using maintanence_mode
- test:
abort-on-fail: true
abort-on-fail: false
config:
rbd_pool: rbd
do_not_create_image: true
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,12 @@
tests:
# Set up the cluster
- test:
abort-on-fail: false
abort-on-fail: true
module: install_prereq.py
name: install ceph pre-requisites

- test:
abort-on-fail: false
abort-on-fail: true
config:
verify_cluster_health: true
steps:
Expand Down Expand Up @@ -51,7 +51,7 @@ tests:
name: deploy cluster

- test:
abort-on-fail: false
abort-on-fail: true
config:
command: add
id: client.1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,11 @@
tests:
# Set up the cluster
- test:
abort-on-fail: false
abort-on-fail: true
module: install_prereq.py
name: install ceph pre-requisites
- test:
abort-on-fail: false
abort-on-fail: true
config:
verify_cluster_health: true
steps:
Expand Down Expand Up @@ -50,7 +50,7 @@ tests:
name: deploy cluster

- test:
abort-on-fail: false
abort-on-fail: true
config:
command: add
id: client.1
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,203 @@
# Ceph-NVMeoF scaledown with a single gatewaygroup for n-1 node failures
# cluster configuration file: conf/squid/nvmeof/ceph_nvmeof_4-nvmeof-gwgroup_2gw_cluster.yaml
# inventory: conf/inventory/rhel-9.3-server-x86_64-xlarge.yaml

tests:
# Set up the cluster
- test:
abort-on-fail: true
module: install_prereq.py
name: install ceph pre-requisites
- test:
abort-on-fail: true
config:
verify_cluster_health: true
steps:
- config:
command: bootstrap
service: cephadm
args:
mon-ip: node1
registry-url: registry.redhat.io
allow-fqdn-hostname: true
log-to-file: true
- config:
command: add_hosts
service: host
args:
attach_ip_address: true
labels: apply-all-labels
- config:
command: apply
service: mgr
args:
placement:
label: mgr
- config:
command: apply
service: mon
args:
placement:
label: mon
- config:
command: apply
service: osd
args:
all-available-devices: true
desc: RHCS cluster deployment using cephadm
destroy-cluster: false
module: test_cephadm.py
name: deploy cluster

- test:
abort-on-fail: true
config:
command: add
id: client.1
nodes:
- node14
install_packages:
- ceph-common
copy_admin_keyring: true
desc: Setup client on NVMEoF gateway
destroy-cluster: false
module: test_client.py
name: configure Ceph client for NVMe tests
polarion-id: CEPH-83573758

# 1 GWgroup 4GW 4-subsystems scaledown 2 nodes -> scaleup 2 nodes
- test:
abort-on-fail: false
config:
rbd_pool: rbd2
gw_group: gw_group1
do_not_create_image: true
rep-pool-only: true
rep_pool_config:
pool: rbd2
install: true # Run SPDK with all pre-requisites
cleanup:
- pool
- gateway
- initiators
gw_nodes:
- node6
- node7
- node8
- node9
subsystems: # Configure subsystems with all sub-entities
- nqn: nqn.2016-06.io.spdk:cnode1
serial: 1
bdevs:
- count: 2
size: 5G
listener_port: 4420
listeners: [node6, node7, node8, node9]
allow_host: "*"
- nqn: nqn.2016-06.io.spdk:cnode2
serial: 1
bdevs:
- count: 2
size: 5G
listener_port: 4420
listeners: [node6, node7, node8, node9]
allow_host: "*"
- nqn: nqn.2016-06.io.spdk:cnode3
serial: 1
bdevs:
- count: 2
size: 5G
listener_port: 4420
listeners: [node6, node7, node8, node9]
allow_host: "*"
- nqn: nqn.2016-06.io.spdk:cnode4
serial: 1
bdevs:
- count: 2
size: 5G
listener_port: 4420
listeners: [node6, node7, node8, node9]
allow_host: "*"
initiators: # Configure Initiators with all pre-req
- nqn: connect-all
listener_port: 4420
node: node14
load_balancing:
- scale_down: ["node6", "node7"] # scale down
- scale_up: ["node6", "node7"] # scale up
- scale_up: ["node10", "node11"] # scale up new nodes
desc: 4GW 1GWgroup namespace load balancing
destroy-cluster: false
module: test_ceph_nvmeof_loadbalancing.py
name: NVMeoF 4GW 1GWgroup namespaces load balancing
polarion-id: CEPH-83598717

# 1 GWgroup 8GW 4-subsystems scaledown from 8 - 2 nodes and scaleup from 2-8 nodes
- test:
abort-on-fail: false
config:
rbd_pool: rbd2
gw_group: gw_group1
do_not_create_image: true
rep-pool-only: true
rep_pool_config:
pool: rbd2
install: true # Run SPDK with all pre-requisites
cleanup:
- pool
- gateway
- initiators
gw_nodes:
- node6
- node7
- node8
- node9
- node10
- node11
- node12
- node13
subsystems: # Configure subsystems with all sub-entities
- nqn: nqn.2016-06.io.spdk:cnode1
serial: 1
bdevs:
- count: 2
size: 5G
listener_port: 4420
listeners: [node6, node7, node8, node9, node10, node11, node12, node13]
allow_host: "*"
- nqn: nqn.2016-06.io.spdk:cnode2
serial: 1
bdevs:
- count: 2
size: 5G
listener_port: 4420
listeners: [node6, node7, node8, node9, node10, node11, node12, node13]
allow_host: "*"
- nqn: nqn.2016-06.io.spdk:cnode3
serial: 1
bdevs:
- count: 2
size: 5G
listener_port: 4420
listeners: [node6, node7, node8, node9, node10, node11, node12, node13]
allow_host: "*"
- nqn: nqn.2016-06.io.spdk:cnode4
serial: 1
bdevs:
- count: 2
size: 5G
listener_port: 4420
listeners: [node6, node7, node8, node9, node10, node11, node12, node13]
allow_host: "*"
initiators: # Configure Initiators with all pre-req
- nqn: connect-all
listener_port: 4420
node: node14
load_balancing:
- scale_down: ["node6", "node7", "node8", "node9", "node10", "node11"] # scale down
- scale_up: ["node6", "node7", "node8", "node9", "node10", "node11"] # scale up
desc: 8GW 1GWgroup namespaces load balancing
destroy-cluster: false
module: test_ceph_nvmeof_loadbalancing.py
name: NVMeoF 8-GW 1GWgroup namespaces load balancing
polarion-id: CEPH-83598716
Loading
Loading