diff --git a/testsuite/kuadrant/policy/dns.py b/testsuite/kuadrant/policy/dns.py index eddeff38..3a4b86d3 100644 --- a/testsuite/kuadrant/policy/dns.py +++ b/testsuite/kuadrant/policy/dns.py @@ -5,7 +5,20 @@ from testsuite.gateway import Referencable from testsuite.kubernetes.client import KubernetesClient from testsuite.kuadrant.policy import Policy -from testsuite.utils import asdict +from testsuite.utils import asdict, check_condition + + +def has_record_condition(condition_type, status="True", reason=None, message=None): + """Returns function, that returns True if the DNSPolicy has specific record condition""" + + def _check(obj): + for record in obj.model.status.recordConditions.values(): + for condition in record: + if check_condition(condition, condition_type, status, reason, message): + return True + return False + + return _check @dataclass diff --git a/testsuite/kubernetes/secret.py b/testsuite/kubernetes/secret.py index db16ba5b..e49bd6cd 100644 --- a/testsuite/kubernetes/secret.py +++ b/testsuite/kubernetes/secret.py @@ -16,7 +16,7 @@ def create_instance( cluster, name, data: dict[str, str], - secret_type: Literal["kubernetes.io/tls", "Opaque"] = "Opaque", + secret_type: Literal["kubernetes.io/tls", "kuadrant.io/aws", "Opaque"] = "Opaque", labels: dict[str, str] = None, ): """Creates new Secret""" @@ -54,7 +54,7 @@ def create_instance( # type: ignore[override] certificate: Certificate, cert_name: str = "tls.crt", key_name: str = "tls.key", - secret_type: Literal["kubernetes.io/tls", "Opaque"] = "kubernetes.io/tls", + secret_type: Literal["kubernetes.io/tls", "kuadrant.io/aws", "Opaque"] = "kubernetes.io/tls", labels: dict[str, str] = None, ): return super().create_instance( diff --git a/testsuite/tests/singlecluster/gateway/dnspolicy/__init__.py b/testsuite/tests/singlecluster/gateway/dnspolicy/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/testsuite/tests/singlecluster/gateway/dnspolicy/test_invalid_credentials.py b/testsuite/tests/singlecluster/gateway/dnspolicy/test_invalid_credentials.py new file mode 100644 index 00000000..146cccb9 --- /dev/null +++ b/testsuite/tests/singlecluster/gateway/dnspolicy/test_invalid_credentials.py @@ -0,0 +1,51 @@ +"""Test DNSPolicy behavior when invalid credentials are provided""" + +import pytest + +from testsuite.kubernetes.secret import Secret +from testsuite.kuadrant.policy import has_condition +from testsuite.kuadrant.policy.dns import has_record_condition +from testsuite.gateway.gateway_api.gateway import KuadrantGateway + +pytestmark = [pytest.mark.kuadrant_only, pytest.mark.dnspolicy] + + +@pytest.fixture(scope="module") +def gateway(request, cluster, blame, wildcard_domain, module_label): + """Create gateway without TLS enabled""" + gw = KuadrantGateway.create_instance(cluster, blame("gw"), wildcard_domain, {"app": module_label}, tls=False) + request.addfinalizer(gw.delete) + gw.commit() + gw.wait_for_ready() + return gw + + +@pytest.fixture(scope="module") +def dns_provider_secret(request, cluster, module_label, blame): + """Create AWS provider secret with invalid credentials""" + creds = { + "AWS_ACCESS_KEY_ID": "ABCDEFGHIJKL", + "AWS_SECRET_ACCESS_KEY": "abcdefg12345+", + } + + secret = Secret.create_instance(cluster, blame("creds"), creds, "kuadrant.io/aws", labels={"app": module_label}) + request.addfinalizer(secret.delete) + secret.commit() + return secret.name() + + +@pytest.fixture(scope="module", autouse=True) +def commit(request, route, dns_policy): # pylint: disable=unused-argument + """Commits dnspolicy without waiting for it to be ready""" + request.addfinalizer(dns_policy.delete) + dns_policy.commit() + + +def test_invalid_credentials(dns_policy): + """Verify that DNSPolicy is not ready or enforced when invalid credentials are provided""" + assert dns_policy.wait_until( + has_condition("Enforced", "False") + ), f"DNSPolicy did not reach expected status, instead it was: {dns_policy.model.status.conditions}" + assert dns_policy.wait_until( + has_record_condition("Ready", "False", "DNSProviderError", "InvalidClientTokenId") + ), f"DNSPolicy did not reach expected record status, instead it was: {dns_policy.model.status.recordConditions}"