From eb0e8bdcd2d4c1c1bb720935552d599eb57b367a Mon Sep 17 00:00:00 2001 From: Niladri Halder Date: Tue, 30 Jul 2024 23:37:50 +0530 Subject: [PATCH] Add BDD spec (#198) * tests(quota): add namespace to the PVC delete calls Signed-off-by: Niladri Halder * tests(bdd): add bdd specs for existing tests Signed-off-by: Niladri Halder * tests(bdd): use Then directive to only check code behaviour Signed-off-by: Niladri Halder --------- Signed-off-by: Niladri Halder --- tests/bdd/ext4_quota.feature | 56 +++++++++++++++++++ tests/bdd/hostpath.feature | 30 ++++++++++ .../bdd/nodeAffinityLabels_cas_config.feature | 21 +++++++ tests/bdd/pvc_cas_config.feature | 50 +++++++++++++++++ tests/bdd/xfs_quota.feature | 54 ++++++++++++++++++ tests/hostpath_quota_test.go | 8 +-- 6 files changed, 215 insertions(+), 4 deletions(-) create mode 100644 tests/bdd/ext4_quota.feature create mode 100644 tests/bdd/hostpath.feature create mode 100644 tests/bdd/nodeAffinityLabels_cas_config.feature create mode 100644 tests/bdd/pvc_cas_config.feature create mode 100644 tests/bdd/xfs_quota.feature diff --git a/tests/bdd/ext4_quota.feature b/tests/bdd/ext4_quota.feature new file mode 100644 index 00000000..1f3436dd --- /dev/null +++ b/tests/bdd/ext4_quota.feature @@ -0,0 +1,56 @@ +Feature: Hostpath EXT4 Quota Local PV + + Scenario: HostPath EXT4 Quota Local PV with Unsupported Filesystem + Given a sparse file "disk.img" + And a loop device is created on top of disk.img + + When a StorageClass is created with the following attributes: + | name | sc-hp-ext4 | + | BasePath | /path/to/hostpath | + | EXT4QuotaEnabled | "true" | + | softLimit | "20%" | + | hardLimit | "50%" | + | provisionerName | openebs.io/local | + | volumeBindingMode | WaitForFirstConsumer | + | reclaimPolicy | Delete | + And a minix filesystem is written into the loop device + And the minix filesystem is mounted with project quota enabled + And a PVC "pvc-hp-ext4" is created with the StorageClass "sc-hp-ext4" + And a Pod is created with PVC "pvc-hp-ext4" + Then the Pod should be in pending state + And the PVC should be in pending state + + When the Pod "busybox-hostpath" is deleted + Then the Pod should be deleted successfully + + When the PVC "pvc-hp-ext4" is deleted + Then the PVC should be deleted successfully + + Scenario: HostPath EXT4 Quota Local PV with EXT4 Filesystem + Given a sparse file "disk.img" + And a loop device is created on top of disk.img + + When a StorageClass with valid EXT4 quota parameters is created + Then it should create a StorageClass with the following attributes: + | name | sc-hp-ext4 | + | BasePath | /path/to/hostpath | + | EXT4QuotaEnabled | "true" | + | provisionerName | openebs.io/local | + | volumeBindingMode | WaitForFirstConsumer | + | reclaimPolicy | Delete | + + When the loop device is formatted with EXT4 filesystem + And the ext4 filesysten is mounted with project quota enabled + And a PVC "pvc-hp-ext4" is created with the StorageClass "sc-hp-ext4" + And a Pod is created with PVC "pvc-hp-ext4" + Then the Pod should be up and running + + When data is written more than the quota limit into the hostpath volume + Then the container process should not be able to write more than the enforced limit + + When the Pod consuming PVC "pvc-hp-ext4" is deleted + Then the Pod should be deleted successfully + + When the PVC "pvc-hp-ext4" is deleted + Then the PVC should be deleted successfully + And the Provisioner should delete the PV diff --git a/tests/bdd/hostpath.feature b/tests/bdd/hostpath.feature new file mode 100644 index 00000000..496c479e --- /dev/null +++ b/tests/bdd/hostpath.feature @@ -0,0 +1,30 @@ +Feature: TEST HOSTPATH LOCAL PV + + Scenario: Creating and Deleting StorageClass, PVC, and Deployment with Busybox + Given a hostpath provisioner is running + When a StorageClass is created with the following attributes: + | name | sc-hp | + | BasePath | /path/to/hostpath | + | provisionerName | openebs.io/local | + | volumeBindingMode | WaitForFirstConsumer | + | reclaimPolicy | Delete | + And a PVC is created with the following attributes: + | name | pvc-hp | + | storageClass | sc-hp | + | accessModes | ReadWriteOnce | + | capacity | 2Gi | + And a deployment with a busybox image is created with the following attributes: + | name | busybox-hostpath | + | image | busybox | + | command | ["sleep", "3600"] | + | volumeMounts | name: demo-vol1, mountPath: /mnt/store1 | + | volumes | name: demo-vol1, pvcName: pvc-hp | + Then the Pod should be in Running state + And a bound PV should be created + + When the deployment is deleted + Then the deployment should not have any deployment or pod remaining + + When the PVC is deleted + Then the PVC should be deleted successfully + Then the PV should be deleted diff --git a/tests/bdd/nodeAffinityLabels_cas_config.feature b/tests/bdd/nodeAffinityLabels_cas_config.feature new file mode 100644 index 00000000..170e356d --- /dev/null +++ b/tests/bdd/nodeAffinityLabels_cas_config.feature @@ -0,0 +1,21 @@ +Feature: Volume Provisioning/De-provisioning with NodeAffinityLabels CAS-config on StorageClass + + Scenario: Volume provisioning/de-provisioning with custom NodeAffinityLabels CAS-config on StorageClass + When a StorageClass is created with the following attributes: + | name | sc-nod-aff-lab | + | BasePath | /path/to/hostpath | + | NodeAffinityLabels | "kubernetes.io/hostname", "kubernetes.io/os", "kubernetes.io/arch" | + | provisionerName | openebs.io/local | + | volumeBindingMode | WaitForFirstConsumer | + | reclaimPolicy | Delete | + And a PVC "pvc-nod-aff-lab" is created with StorageClass "sc-nod-aff-lab" + And a deployment with a busybox image is created with PVC "pvc-nod-aff-lab" + Then a Pod should be up and running + And a bound PV should be created + And the SC NodeAffinityLabels CAS-config should be set correctly on the PV + + When the application Deployment is deleted + Then The Pod should be deleted + + When the PVC is deleted + Then the PV should be deleted diff --git a/tests/bdd/pvc_cas_config.feature b/tests/bdd/pvc_cas_config.feature new file mode 100644 index 00000000..b1c9a764 --- /dev/null +++ b/tests/bdd/pvc_cas_config.feature @@ -0,0 +1,50 @@ +Feature: Volume Provisioning/De-provisioning with Additive and Conflicting CAS-configs on PVC and SC + + Scenario: Additive CAS-configs on PVC and SC + When a StorageClass with is created with the following attributes: + | name | sc-additive-cas-config | + | BasePath | /path/to/hostpath | + | provisionerName | openebs.io/local | + | volumeBindingMode | WaitForFirstConsumer | + | reclaimPolicy | Delete | + And a PVC "pvc-additive-cas-config" is created with the following attributes: + | name | pvc-additive-cas-config | + | storageClass | sc-hp | + | NodeAffinityLabels | "kubernetes.io/os", "kubernetes.io/arch" | + | accessModes | ReadWriteOnce | + | capacity | 2Gi | + And a Deployment is created with PVC "pvc-additive-cas-config" + Then the Pod should be up and running + And a bound PV should be created + And the PVC NodeAffinityLabels CAS-configs should be set correctly on the PV + + When the application Deployment is deleted + Then The Pod should be deleted + + When the PVC is deleted + Then the PV should be deleted + + Scenario: Conflicting CAS-configs on PVC and SC + When a StorageClass is created with the following attributes: + | name | sc-conflicting-cas-config | + | BasePath | /path/to/hostpath | + | NodeAffinityLabels | "kubernetes.io/hostname" | + | provisionerName | openebs.io/local | + | volumeBindingMode | WaitForFirstConsumer | + | reclaimPolicy | Delete | + And a PVC "pvc-conflicting-cas-config" is created with the following attributes: + | name | pvc-conflicting-cas-config | + | storageClass | sc-hp | + | NodeAffinityLabels | "kubernetes.io/os", "kubernetes.io/arch" | + | accessModes | ReadWriteOnce | + | capacity | 2Gi | + And a Deployment is created with PVC "pvc-conflicting-cas-config" + Then a Pod should be up and running + And a bound PV should be created + And the SC NodeAffinityLabels CAS-config should be set correctly on the PV + + When the application Deployment deleted + Then The Pod should be deleted + + When the PVC is deleted + Then the PV should be deleted diff --git a/tests/bdd/xfs_quota.feature b/tests/bdd/xfs_quota.feature new file mode 100644 index 00000000..d5109357 --- /dev/null +++ b/tests/bdd/xfs_quota.feature @@ -0,0 +1,54 @@ +Feature: Hostpath XFS Quota Local PV + + Scenario: HostPath XFS Quota Local PV with Unsupported Filesystem + Given a sparse file "disk.img" + And a loop device is created on top of disk.img + + When a StorageClass is created with the following attributes: + | name | sc-hp-xfs | + | BasePath | /path/to/hostpath | + | XFSQuotaEnabled | "true" | + | softLimit | "20%" | + | hardLimit | "50%" | + | provisionerName | openebs.io/local | + | volumeBindingMode | WaitForFirstConsumer | + | reclaimPolicy | Delete | + And a minix filesystem is written into the loop device + And the minix filesystem is mounted with project quota enabled + And a PVC "pvc-hp-xfs" is created with the StorageClass "sc-hp-xfs" + And a Pod is created with PVC "pvc-hp-xfs" + Then the Pod should be in pending state + And the PVC should be in pending state + + When the Pod "busybox-hostpath" is deleted + Then the Pod should be deleted successfully + + When the PVC "pvc-hp-xfs" is deleted + Then the PVC should be deleted successfully + + Scenario: HostPath XFS Quota Local PV with XFS Filesystem + Given a sparse file "disk.img" + And a loop device is created on top of disk.img + + When a StorageClass is created with the following attributes: + | name | sc-hp-xfs | + | BasePath | /path/to/hostpath | + | XFSQuotaEnabled | "true" | + | provisionerName | openebs.io/local | + | volumeBindingMode | WaitForFirstConsumer | + | reclaimPolicy | Delete | + And the loop device is formatted with XFS filesystem + And the xfs filesysten is mounted with project quota enabled + And a PVC "pvc-hp-xfs" is created with the StorageClass "sc-hp-xfs" + And a Pod is created with PVC "pvc-hp-xfs" + Then the Pod should be up and running + + When data is written more than the quota limit into the hostpath volume + Then the container process should not be able to write more than the enforced limit + + When the Pod consuming PVC "pvc-hp-xfs" is deleted + Then the Pod should be deleted successfully + + When the PVC "pvc-hp-xfs" is deleted + Then the PVC should be deleted successfully + And the Provisioner should delete the PV diff --git a/tests/hostpath_quota_test.go b/tests/hostpath_quota_test.go index c4b525c1..7f7c1512 100644 --- a/tests/hostpath_quota_test.go +++ b/tests/hostpath_quota_test.go @@ -164,7 +164,7 @@ var _ = Describe("TEST HOSTPATH XFS QUOTA LOCAL PV WITH UNSUPPORTED FILESYSTEM", ) By("deleting above PVC") - err = ops.PVCClient.Delete(context.TODO(), pvcName, &metav1.DeleteOptions{}) + err = ops.PVCClient.WithNamespace(namespaceObj.Name).Delete(context.TODO(), pvcName, &metav1.DeleteOptions{}) Expect(err).To( BeNil(), "while deleting pvc {%s} in namespace {%s}", @@ -357,7 +357,7 @@ var _ = Describe("TEST HOSTPATH XFS QUOTA LOCAL PV WITH XFS FILESYSTEM", func() ) By("deleting above PVC") - err = ops.PVCClient.Delete(context.TODO(), pvcName, &metav1.DeleteOptions{}) + err = ops.PVCClient.WithNamespace(namespaceObj.Name).Delete(context.TODO(), pvcName, &metav1.DeleteOptions{}) Expect(err).To( BeNil(), "while deleting pvc {%s} in namespace {%s}", @@ -543,7 +543,7 @@ var _ = Describe("TEST HOSTPATH EXT4 QUOTA LOCAL PV WITH UNSUPPORTED FILESYSTEM" ) By("deleting above PVC") - err = ops.PVCClient.Delete(context.TODO(), pvcName, &metav1.DeleteOptions{}) + err = ops.PVCClient.WithNamespace(namespaceObj.Name).Delete(context.TODO(), pvcName, &metav1.DeleteOptions{}) Expect(err).To( BeNil(), "while deleting pvc {%s} in namespace {%s}", @@ -736,7 +736,7 @@ var _ = Describe("TEST HOSTPATH EXT4 QUOTA LOCAL PV WITH EXT4 FILESYSTEM", func( ) By("deleting above PVC") - err = ops.PVCClient.Delete(context.TODO(), pvcName, &metav1.DeleteOptions{}) + err = ops.PVCClient.WithNamespace(namespaceObj.Name).Delete(context.TODO(), pvcName, &metav1.DeleteOptions{}) Expect(err).To( BeNil(), "while deleting pvc {%s} in namespace {%s}",