From 54d3e49c4338238489bc7a35a0bba959ebf7a53a Mon Sep 17 00:00:00 2001 From: Bryce Palmer Date: Tue, 25 Jul 2023 16:44:44 -0400 Subject: [PATCH] (repo): transplant ansible related logic from operator sdk (#2) Signed-off-by: Bryce Palmer --- .github/ISSUE_TEMPLATE.md | 5 + .github/ISSUE_TEMPLATE/bug-report.md | 63 + .github/ISSUE_TEMPLATE/doc-report.md | 28 + .github/ISSUE_TEMPLATE/feature-request.md | 28 + .github/ISSUE_TEMPLATE/support-question.md | 69 ++ .github/PULL_REQUEST_TEMPLATE.md | 25 + .github/workflows/test-ansible.yml | 38 + .github/workflows/test-sanity.yml | 18 + .github/workflows/unit.yml | 19 + .gitignore | 132 ++ Makefile | 179 +++ cmd/ansible-operator/main.go | 43 + go.mod | 131 ++ go.sum | 1104 +++++++++++++++++ hack/check-error-log-msg-format.sh | 25 + hack/check-license.sh | 20 + hack/generate/samples/generate_testdata.go | 57 + .../internal/ansible/advanced_molecule.go | 600 +++++++++ .../samples/internal/ansible/constants.go | 588 +++++++++ .../samples/internal/ansible/generate.go | 23 + .../samples/internal/ansible/memcached.go | 136 ++ .../internal/ansible/memcached_molecule.go | 210 ++++ .../internal/ansible/testdata/ansible.cfg | 10 + .../testdata/fixture_collection/galaxy.yml | 6 + .../roles/dummy/tasks/main.yml | 12 + .../testdata/inventory/group_vars/test.yml | 3 + .../internal/ansible/testdata/inventory/hosts | 5 + .../finalizerconcurrencyfinalizer.yml | 34 + .../ansible/testdata/plugins/filter/test.py | 17 + .../internal/ansible/testdata/secret.yml | 22 + .../ansible/testdata/tasks/argstest_test.yml | 25 + .../ansible/testdata/tasks/casetest_test.yml | 24 + .../tasks/clusterannotationtest_test.yml | 51 + .../testdata/tasks/collectiontest_test.yml | 26 + .../tasks/finalizerconcurrencytest_test.yml | 58 + .../testdata/tasks/inventorytest_test.yml | 22 + .../tasks/reconciliationtest_test.yml | 32 + .../testdata/tasks/secretstest_test.yml | 56 + .../testdata/tasks/selectortest_test.yml | 51 + .../testdata/tasks/subresourcestest_test.yml | 26 + .../internal/ansible/testdata/watches.yaml | 84 ++ hack/generate/samples/internal/pkg/context.go | 30 + hack/generate/samples/internal/pkg/utils.go | 89 ++ hack/generate/samples/molecule/generate.go | 78 ++ hack/lib/common.sh | 66 + hack/tests/e2e-ansible-molecule.sh | 70 ++ images/ansible-operator/Dockerfile | 37 + images/ansible-operator/Pipfile | 16 + images/ansible-operator/Pipfile.lock | 549 ++++++++ images/ansible-operator/base.Dockerfile | 60 + internal/annotations/metrics/metrics.go | 85 ++ .../annotations/metrics/metrics_suite_test.go | 27 + internal/annotations/metrics/metrics_test.go | 45 + internal/annotations/scorecard/scorecard.go | 54 + internal/ansible/apiserver/apiserver.go | 77 ++ internal/ansible/controller/controller.go | 149 +++ .../ansible/controller/controller_test.go | 39 + internal/ansible/controller/reconcile.go | 457 +++++++ internal/ansible/controller/reconcile_test.go | 598 +++++++++ internal/ansible/controller/status/types.go | 200 +++ internal/ansible/controller/status/utils.go | 96 ++ .../ansible/controller/status/utils_test.go | 302 +++++ internal/ansible/events/log_events.go | 189 +++ internal/ansible/flags/flag.go | 243 ++++ internal/ansible/flags/flag_test.go | 72 ++ internal/ansible/flags/suite_test.go | 27 + .../ansible/handler/handler_suite_test.go | 36 + .../handler/logging_enqueue_annotation.go | 113 ++ .../logging_enqueue_annotation_test.go | 442 +++++++ .../ansible/handler/logging_enqueue_object.go | 69 ++ .../handler/logging_enqueue_object_test.go | 218 ++++ .../ansible/handler/logging_enqueue_owner.go | 97 ++ .../handler/logging_enqueue_owner_test.go | 269 ++++ internal/ansible/metrics/metrics.go | 263 ++++ internal/ansible/paramconv/paramconv.go | 209 ++++ internal/ansible/paramconv/paramconv_test.go | 242 ++++ internal/ansible/proxy/cache_response.go | 305 +++++ .../proxy/controllermap/controllermap.go | 111 ++ internal/ansible/proxy/inject_owner.go | 188 +++ internal/ansible/proxy/inject_owner_test.go | 125 ++ .../ansible/proxy/kubeconfig/kubeconfig.go | 126 ++ internal/ansible/proxy/kubectl.go | 278 +++++ internal/ansible/proxy/proxy.go | 393 ++++++ internal/ansible/proxy/proxy_suite_test.go | 128 ++ internal/ansible/proxy/proxy_test.go | 77 ++ .../proxy/requestfactory/requestinfo.go | 278 +++++ internal/ansible/runner/eventapi/eventapi.go | 177 +++ internal/ansible/runner/eventapi/types.go | 138 +++ internal/ansible/runner/fake/runner.go | 96 ++ .../runner/internal/inputdir/inputdir.go | 208 ++++ internal/ansible/runner/runner.go | 455 +++++++ internal/ansible/runner/runner_test.go | 373 ++++++ internal/ansible/runner/testdata/playbook.yml | 0 .../runner/testdata/roles/role/tasks.yaml | 0 .../collection/roles/someRole/empty_file | 0 .../watches/testdata/duplicate_gvk.yaml | 17 + .../ansible/watches/testdata/invalid.yaml | 9 + .../watches/testdata/invalid_collection.yaml | 5 + .../watches/testdata/invalid_duration.yaml | 6 + .../testdata/invalid_finalizer_no_vars.yaml | 7 + .../invalid_finalizer_playbook_path.yaml | 10 + .../testdata/invalid_finalizer_role_path.yaml | 10 + .../invalid_finalizer_whithout_name.yaml | 8 + .../testdata/invalid_playbook_path.yaml | 9 + .../watches/testdata/invalid_role_path.yaml | 9 + .../watches/testdata/invalid_status.yaml | 6 + .../watches/testdata/invalid_yaml_file.yaml | 3 + .../ansible/watches/testdata/playbook.yml | 0 .../watches/testdata/roles/role/tasks.yaml | 0 .../ansible/watches/testdata/valid.yaml.tmpl | 124 ++ internal/ansible/watches/watches.go | 484 ++++++++ internal/ansible/watches/watches_test.go | 894 +++++++++++++ internal/cmd/ansible-operator/run/cmd.go | 365 ++++++ .../ansible-operator/run/proxy_suite_test.go | 27 + internal/cmd/ansible-operator/version/cmd.go | 44 + .../cmd/ansible-operator/version/cmd_test.go | 65 + .../version/version_suite_test.go | 27 + internal/flags/flags.go | 20 + internal/plugins/ansible/v1/api.go | 167 +++ .../plugins/ansible/v1/constants/constants.go | 30 + internal/plugins/ansible/v1/init.go | 267 ++++ internal/plugins/ansible/v1/plugin.go | 48 + internal/plugins/ansible/v1/scaffolds/api.go | 108 ++ internal/plugins/ansible/v1/scaffolds/init.go | 103 ++ .../internal/templates/config/crd/crd.go | 107 ++ .../templates/config/crd/kustomization.go | 80 ++ .../internal/templates/config/rbac/role.go | 155 +++ .../config/testing/debug_logs_patch.go | 58 + .../templates/config/testing/kustomization.go | 67 + .../templates/config/testing/manager_image.go | 56 + .../testing/pullpolicy/always_pull_patch.go | 56 + .../pullpolicy/ifnotpresent_pull_patch.go | 56 + .../testing/pullpolicy/never_pull_patch.go | 56 + .../internal/templates/dockerfile.go | 66 + .../scaffolds/internal/templates/gitignore.go | 56 + .../scaffolds/internal/templates/makefile.go | 176 +++ .../templates/molecule/mdefault/converge.go | 57 + .../templates/molecule/mdefault/create.go | 45 + .../templates/molecule/mdefault/destroy.go | 63 + .../templates/molecule/mdefault/kustomize.go | 61 + .../templates/molecule/mdefault/molecule.go | 66 + .../templates/molecule/mdefault/prepare.go | 67 + .../molecule/mdefault/tasks_test_resource.go | 62 + .../templates/molecule/mdefault/verify.go | 96 ++ .../templates/molecule/mkind/converge.go | 63 + .../templates/molecule/mkind/create.go | 47 + .../templates/molecule/mkind/destroy.go | 55 + .../templates/molecule/mkind/molecule.go | 72 ++ .../templates/playbooks/placeholder.go | 38 + .../internal/templates/playbooks/playbook.go | 57 + .../internal/templates/requirements.go | 46 + .../internal/templates/roles/defaults_main.go | 44 + .../internal/templates/roles/files_dir.go | 43 + .../internal/templates/roles/handlers_main.go | 45 + .../internal/templates/roles/meta_main.go | 107 ++ .../internal/templates/roles/placeholder.go | 38 + .../internal/templates/roles/readme.go | 87 ++ .../internal/templates/roles/tasks_main.go | 45 + .../internal/templates/roles/templates_dir.go | 43 + .../internal/templates/roles/vars_main.go | 45 + .../scaffolds/internal/templates/watches.go | 115 ++ internal/plugins/plugins.go | 19 + internal/plugins/util/cleanup.go | 208 ++++ internal/plugins/util/legacy_keys.go | 93 ++ internal/plugins/util/message.go | 20 + internal/testutils/olm.go | 97 ++ internal/testutils/scorecard.go | 103 ++ internal/testutils/utils.go | 222 ++++ internal/util/bundleutil/bundleutil.go | 281 +++++ internal/util/bundleutil/template.go | 79 ++ internal/util/k8sutil/api.go | 205 +++ internal/util/k8sutil/api_test.go | 74 ++ internal/util/k8sutil/constants.go | 26 + internal/util/k8sutil/k8sutil.go | 166 +++ internal/util/k8sutil/k8sutil_test.go | 385 ++++++ internal/util/k8sutil/object.go | 55 + internal/util/k8sutil/scan.go | 86 ++ .../util/projutil/interactive_promt_util.go | 113 ++ .../projutil/interactive_promt_util_test.go | 103 ++ internal/util/projutil/project_util.go | 170 +++ internal/util/projutil/projutil_test.go | 98 ++ internal/version/version.go | 30 + test/common/sa_secret.go | 42 + test/common/scorecard.go | 122 ++ test/e2e/ansible/cluster_test.go | 394 ++++++ test/e2e/ansible/local_test.go | 50 + test/e2e/ansible/olm_test.go | 58 + test/e2e/ansible/scorecard_test.go | 23 + test/e2e/ansible/suite_test.go | 263 ++++ .../ansible/memcached-operator/.gitignore | 14 + .../ansible/memcached-operator/Dockerfile | 9 + testdata/ansible/memcached-operator/Makefile | 231 ++++ testdata/ansible/memcached-operator/PROJECT | 20 + .../memcached-operator/bundle.Dockerfile | 17 + .../cache.example.com_memcacheds.yaml | 50 + ...nitoring.coreos.com_v1_servicemonitor.yaml | 23 + ...er-manager-metrics-service_v1_service.yaml | 23 + ...c.authorization.k8s.io_v1_clusterrole.yaml | 17 + ...cached-operator.clusterserviceversion.yaml | 261 ++++ .../bundle/metadata/annotations.yaml | 11 + .../bundle/tests/scorecard/config.yaml | 70 ++ .../bases/cache.example.com_memcacheds.yaml | 44 + .../config/crd/kustomization.yaml | 6 + .../config/default/kustomization.yaml | 30 + .../default/manager_auth_proxy_patch.yaml | 56 + .../config/default/manager_config_patch.yaml | 10 + .../config/manager/kustomization.yaml | 8 + .../config/manager/manager.yaml | 99 ++ ...cached-operator.clusterserviceversion.yaml | 42 + .../config/manifests/kustomization.yaml | 7 + .../config/prometheus/kustomization.yaml | 2 + .../config/prometheus/monitor.yaml | 26 + .../rbac/auth_proxy_client_clusterrole.yaml | 16 + .../config/rbac/auth_proxy_role.yaml | 24 + .../config/rbac/auth_proxy_role_binding.yaml | 19 + .../config/rbac/auth_proxy_service.yaml | 21 + .../config/rbac/kustomization.yaml | 18 + .../config/rbac/leader_election_role.yaml | 44 + .../rbac/leader_election_role_binding.yaml | 19 + .../config/rbac/memcached_editor_role.yaml | 31 + .../config/rbac/memcached_viewer_role.yaml | 27 + .../memcached-operator/config/rbac/role.yaml | 57 + .../config/rbac/role_binding.yaml | 19 + .../config/rbac/service_account.yaml | 12 + .../samples/cache_v1alpha1_memcached.yaml | 12 + .../config/samples/kustomization.yaml | 4 + .../config/scorecard/bases/config.yaml | 7 + .../config/scorecard/kustomization.yaml | 16 + .../scorecard/patches/basic.config.yaml | 10 + .../config/scorecard/patches/olm.config.yaml | 50 + .../config/testing/debug_logs_patch.yaml | 14 + .../config/testing/kustomization.yaml | 23 + .../config/testing/manager_image.yaml | 12 + .../config/testing/pull_policy/Always.yaml | 12 + .../testing/pull_policy/IfNotPresent.yaml | 12 + .../config/testing/pull_policy/Never.yaml | 12 + .../molecule/default/converge.yml | 18 + .../molecule/default/create.yml | 6 + .../molecule/default/destroy.yml | 24 + .../molecule/default/kustomize.yml | 22 + .../molecule/default/molecule.yml | 36 + .../molecule/default/prepare.yml | 28 + .../molecule/default/tasks/memcached_test.yml | 129 ++ .../molecule/default/verify.yml | 57 + .../molecule/kind/converge.yml | 24 + .../molecule/kind/create.yml | 8 + .../molecule/kind/destroy.yml | 16 + .../molecule/kind/molecule.yml | 33 + .../memcached-operator/playbooks/.placeholder | 0 .../playbooks/memcached.yml | 9 + .../memcached-operator/requirements.yml | 10 + .../memcached-operator/roles/.placeholder | 0 .../roles/memcached/README.md | 43 + .../roles/memcached/defaults/main.yml | 2 + .../roles/memcached/files/.placeholder | 0 .../roles/memcached/handlers/main.yml | 2 + .../roles/memcached/meta/main.yml | 64 + .../roles/memcached/tasks/main.yml | 180 +++ .../roles/memcached/templates/.placeholder | 0 .../roles/memcached/vars/main.yml | 2 + .../ansible/memcached-operator/watches.yaml | 7 + tools/scripts/fetch | 52 + tools/tools.go | 7 + 263 files changed, 24473 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE.md create mode 100644 .github/ISSUE_TEMPLATE/bug-report.md create mode 100644 .github/ISSUE_TEMPLATE/doc-report.md create mode 100644 .github/ISSUE_TEMPLATE/feature-request.md create mode 100644 .github/ISSUE_TEMPLATE/support-question.md create mode 100644 .github/PULL_REQUEST_TEMPLATE.md create mode 100644 .github/workflows/test-ansible.yml create mode 100644 .github/workflows/test-sanity.yml create mode 100644 .github/workflows/unit.yml create mode 100644 Makefile create mode 100644 cmd/ansible-operator/main.go create mode 100644 go.mod create mode 100644 go.sum create mode 100755 hack/check-error-log-msg-format.sh create mode 100755 hack/check-license.sh create mode 100644 hack/generate/samples/generate_testdata.go create mode 100644 hack/generate/samples/internal/ansible/advanced_molecule.go create mode 100644 hack/generate/samples/internal/ansible/constants.go create mode 100644 hack/generate/samples/internal/ansible/generate.go create mode 100644 hack/generate/samples/internal/ansible/memcached.go create mode 100644 hack/generate/samples/internal/ansible/memcached_molecule.go create mode 100644 hack/generate/samples/internal/ansible/testdata/ansible.cfg create mode 100644 hack/generate/samples/internal/ansible/testdata/fixture_collection/galaxy.yml create mode 100644 hack/generate/samples/internal/ansible/testdata/fixture_collection/roles/dummy/tasks/main.yml create mode 100644 hack/generate/samples/internal/ansible/testdata/inventory/group_vars/test.yml create mode 100644 hack/generate/samples/internal/ansible/testdata/inventory/hosts create mode 100644 hack/generate/samples/internal/ansible/testdata/playbooks/finalizerconcurrencyfinalizer.yml create mode 100644 hack/generate/samples/internal/ansible/testdata/plugins/filter/test.py create mode 100644 hack/generate/samples/internal/ansible/testdata/secret.yml create mode 100644 hack/generate/samples/internal/ansible/testdata/tasks/argstest_test.yml create mode 100644 hack/generate/samples/internal/ansible/testdata/tasks/casetest_test.yml create mode 100644 hack/generate/samples/internal/ansible/testdata/tasks/clusterannotationtest_test.yml create mode 100644 hack/generate/samples/internal/ansible/testdata/tasks/collectiontest_test.yml create mode 100644 hack/generate/samples/internal/ansible/testdata/tasks/finalizerconcurrencytest_test.yml create mode 100644 hack/generate/samples/internal/ansible/testdata/tasks/inventorytest_test.yml create mode 100644 hack/generate/samples/internal/ansible/testdata/tasks/reconciliationtest_test.yml create mode 100644 hack/generate/samples/internal/ansible/testdata/tasks/secretstest_test.yml create mode 100644 hack/generate/samples/internal/ansible/testdata/tasks/selectortest_test.yml create mode 100644 hack/generate/samples/internal/ansible/testdata/tasks/subresourcestest_test.yml create mode 100644 hack/generate/samples/internal/ansible/testdata/watches.yaml create mode 100644 hack/generate/samples/internal/pkg/context.go create mode 100644 hack/generate/samples/internal/pkg/utils.go create mode 100644 hack/generate/samples/molecule/generate.go create mode 100644 hack/lib/common.sh create mode 100755 hack/tests/e2e-ansible-molecule.sh create mode 100644 images/ansible-operator/Dockerfile create mode 100644 images/ansible-operator/Pipfile create mode 100644 images/ansible-operator/Pipfile.lock create mode 100644 images/ansible-operator/base.Dockerfile create mode 100644 internal/annotations/metrics/metrics.go create mode 100644 internal/annotations/metrics/metrics_suite_test.go create mode 100644 internal/annotations/metrics/metrics_test.go create mode 100644 internal/annotations/scorecard/scorecard.go create mode 100644 internal/ansible/apiserver/apiserver.go create mode 100644 internal/ansible/controller/controller.go create mode 100644 internal/ansible/controller/controller_test.go create mode 100644 internal/ansible/controller/reconcile.go create mode 100644 internal/ansible/controller/reconcile_test.go create mode 100644 internal/ansible/controller/status/types.go create mode 100644 internal/ansible/controller/status/utils.go create mode 100644 internal/ansible/controller/status/utils_test.go create mode 100644 internal/ansible/events/log_events.go create mode 100644 internal/ansible/flags/flag.go create mode 100644 internal/ansible/flags/flag_test.go create mode 100644 internal/ansible/flags/suite_test.go create mode 100644 internal/ansible/handler/handler_suite_test.go create mode 100644 internal/ansible/handler/logging_enqueue_annotation.go create mode 100644 internal/ansible/handler/logging_enqueue_annotation_test.go create mode 100644 internal/ansible/handler/logging_enqueue_object.go create mode 100644 internal/ansible/handler/logging_enqueue_object_test.go create mode 100644 internal/ansible/handler/logging_enqueue_owner.go create mode 100644 internal/ansible/handler/logging_enqueue_owner_test.go create mode 100644 internal/ansible/metrics/metrics.go create mode 100644 internal/ansible/paramconv/paramconv.go create mode 100644 internal/ansible/paramconv/paramconv_test.go create mode 100644 internal/ansible/proxy/cache_response.go create mode 100644 internal/ansible/proxy/controllermap/controllermap.go create mode 100644 internal/ansible/proxy/inject_owner.go create mode 100644 internal/ansible/proxy/inject_owner_test.go create mode 100644 internal/ansible/proxy/kubeconfig/kubeconfig.go create mode 100644 internal/ansible/proxy/kubectl.go create mode 100644 internal/ansible/proxy/proxy.go create mode 100644 internal/ansible/proxy/proxy_suite_test.go create mode 100644 internal/ansible/proxy/proxy_test.go create mode 100644 internal/ansible/proxy/requestfactory/requestinfo.go create mode 100644 internal/ansible/runner/eventapi/eventapi.go create mode 100644 internal/ansible/runner/eventapi/types.go create mode 100644 internal/ansible/runner/fake/runner.go create mode 100644 internal/ansible/runner/internal/inputdir/inputdir.go create mode 100644 internal/ansible/runner/runner.go create mode 100644 internal/ansible/runner/runner_test.go create mode 100644 internal/ansible/runner/testdata/playbook.yml create mode 100644 internal/ansible/runner/testdata/roles/role/tasks.yaml create mode 100644 internal/ansible/watches/testdata/ansible_collections/nameSpace/collection/roles/someRole/empty_file create mode 100644 internal/ansible/watches/testdata/duplicate_gvk.yaml create mode 100644 internal/ansible/watches/testdata/invalid.yaml create mode 100644 internal/ansible/watches/testdata/invalid_collection.yaml create mode 100644 internal/ansible/watches/testdata/invalid_duration.yaml create mode 100644 internal/ansible/watches/testdata/invalid_finalizer_no_vars.yaml create mode 100644 internal/ansible/watches/testdata/invalid_finalizer_playbook_path.yaml create mode 100644 internal/ansible/watches/testdata/invalid_finalizer_role_path.yaml create mode 100644 internal/ansible/watches/testdata/invalid_finalizer_whithout_name.yaml create mode 100644 internal/ansible/watches/testdata/invalid_playbook_path.yaml create mode 100644 internal/ansible/watches/testdata/invalid_role_path.yaml create mode 100644 internal/ansible/watches/testdata/invalid_status.yaml create mode 100644 internal/ansible/watches/testdata/invalid_yaml_file.yaml create mode 100644 internal/ansible/watches/testdata/playbook.yml create mode 100644 internal/ansible/watches/testdata/roles/role/tasks.yaml create mode 100644 internal/ansible/watches/testdata/valid.yaml.tmpl create mode 100644 internal/ansible/watches/watches.go create mode 100644 internal/ansible/watches/watches_test.go create mode 100644 internal/cmd/ansible-operator/run/cmd.go create mode 100644 internal/cmd/ansible-operator/run/proxy_suite_test.go create mode 100644 internal/cmd/ansible-operator/version/cmd.go create mode 100644 internal/cmd/ansible-operator/version/cmd_test.go create mode 100644 internal/cmd/ansible-operator/version/version_suite_test.go create mode 100644 internal/flags/flags.go create mode 100644 internal/plugins/ansible/v1/api.go create mode 100644 internal/plugins/ansible/v1/constants/constants.go create mode 100644 internal/plugins/ansible/v1/init.go create mode 100644 internal/plugins/ansible/v1/plugin.go create mode 100644 internal/plugins/ansible/v1/scaffolds/api.go create mode 100644 internal/plugins/ansible/v1/scaffolds/init.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/config/crd/crd.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/config/crd/kustomization.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/config/rbac/role.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/config/testing/debug_logs_patch.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/config/testing/kustomization.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/config/testing/manager_image.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/config/testing/pullpolicy/always_pull_patch.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/config/testing/pullpolicy/ifnotpresent_pull_patch.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/config/testing/pullpolicy/never_pull_patch.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/dockerfile.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/gitignore.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/makefile.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault/converge.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault/create.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault/destroy.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault/kustomize.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault/molecule.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault/prepare.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault/tasks_test_resource.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault/verify.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mkind/converge.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mkind/create.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mkind/destroy.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mkind/molecule.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/playbooks/placeholder.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/playbooks/playbook.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/requirements.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/roles/defaults_main.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/roles/files_dir.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/roles/handlers_main.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/roles/meta_main.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/roles/placeholder.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/roles/readme.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/roles/tasks_main.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/roles/templates_dir.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/roles/vars_main.go create mode 100644 internal/plugins/ansible/v1/scaffolds/internal/templates/watches.go create mode 100644 internal/plugins/plugins.go create mode 100644 internal/plugins/util/cleanup.go create mode 100644 internal/plugins/util/legacy_keys.go create mode 100644 internal/plugins/util/message.go create mode 100644 internal/testutils/olm.go create mode 100644 internal/testutils/scorecard.go create mode 100644 internal/testutils/utils.go create mode 100644 internal/util/bundleutil/bundleutil.go create mode 100644 internal/util/bundleutil/template.go create mode 100644 internal/util/k8sutil/api.go create mode 100644 internal/util/k8sutil/api_test.go create mode 100644 internal/util/k8sutil/constants.go create mode 100644 internal/util/k8sutil/k8sutil.go create mode 100644 internal/util/k8sutil/k8sutil_test.go create mode 100644 internal/util/k8sutil/object.go create mode 100644 internal/util/k8sutil/scan.go create mode 100644 internal/util/projutil/interactive_promt_util.go create mode 100644 internal/util/projutil/interactive_promt_util_test.go create mode 100644 internal/util/projutil/project_util.go create mode 100644 internal/util/projutil/projutil_test.go create mode 100644 internal/version/version.go create mode 100644 test/common/sa_secret.go create mode 100644 test/common/scorecard.go create mode 100644 test/e2e/ansible/cluster_test.go create mode 100644 test/e2e/ansible/local_test.go create mode 100644 test/e2e/ansible/olm_test.go create mode 100644 test/e2e/ansible/scorecard_test.go create mode 100644 test/e2e/ansible/suite_test.go create mode 100644 testdata/ansible/memcached-operator/.gitignore create mode 100644 testdata/ansible/memcached-operator/Dockerfile create mode 100644 testdata/ansible/memcached-operator/Makefile create mode 100644 testdata/ansible/memcached-operator/PROJECT create mode 100644 testdata/ansible/memcached-operator/bundle.Dockerfile create mode 100644 testdata/ansible/memcached-operator/bundle/manifests/cache.example.com_memcacheds.yaml create mode 100644 testdata/ansible/memcached-operator/bundle/manifests/memcached-operator-controller-manager-metrics-monitor_monitoring.coreos.com_v1_servicemonitor.yaml create mode 100644 testdata/ansible/memcached-operator/bundle/manifests/memcached-operator-controller-manager-metrics-service_v1_service.yaml create mode 100644 testdata/ansible/memcached-operator/bundle/manifests/memcached-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml create mode 100644 testdata/ansible/memcached-operator/bundle/manifests/memcached-operator.clusterserviceversion.yaml create mode 100644 testdata/ansible/memcached-operator/bundle/metadata/annotations.yaml create mode 100644 testdata/ansible/memcached-operator/bundle/tests/scorecard/config.yaml create mode 100644 testdata/ansible/memcached-operator/config/crd/bases/cache.example.com_memcacheds.yaml create mode 100644 testdata/ansible/memcached-operator/config/crd/kustomization.yaml create mode 100644 testdata/ansible/memcached-operator/config/default/kustomization.yaml create mode 100644 testdata/ansible/memcached-operator/config/default/manager_auth_proxy_patch.yaml create mode 100644 testdata/ansible/memcached-operator/config/default/manager_config_patch.yaml create mode 100644 testdata/ansible/memcached-operator/config/manager/kustomization.yaml create mode 100644 testdata/ansible/memcached-operator/config/manager/manager.yaml create mode 100644 testdata/ansible/memcached-operator/config/manifests/bases/memcached-operator.clusterserviceversion.yaml create mode 100644 testdata/ansible/memcached-operator/config/manifests/kustomization.yaml create mode 100644 testdata/ansible/memcached-operator/config/prometheus/kustomization.yaml create mode 100644 testdata/ansible/memcached-operator/config/prometheus/monitor.yaml create mode 100644 testdata/ansible/memcached-operator/config/rbac/auth_proxy_client_clusterrole.yaml create mode 100644 testdata/ansible/memcached-operator/config/rbac/auth_proxy_role.yaml create mode 100644 testdata/ansible/memcached-operator/config/rbac/auth_proxy_role_binding.yaml create mode 100644 testdata/ansible/memcached-operator/config/rbac/auth_proxy_service.yaml create mode 100644 testdata/ansible/memcached-operator/config/rbac/kustomization.yaml create mode 100644 testdata/ansible/memcached-operator/config/rbac/leader_election_role.yaml create mode 100644 testdata/ansible/memcached-operator/config/rbac/leader_election_role_binding.yaml create mode 100644 testdata/ansible/memcached-operator/config/rbac/memcached_editor_role.yaml create mode 100644 testdata/ansible/memcached-operator/config/rbac/memcached_viewer_role.yaml create mode 100644 testdata/ansible/memcached-operator/config/rbac/role.yaml create mode 100644 testdata/ansible/memcached-operator/config/rbac/role_binding.yaml create mode 100644 testdata/ansible/memcached-operator/config/rbac/service_account.yaml create mode 100644 testdata/ansible/memcached-operator/config/samples/cache_v1alpha1_memcached.yaml create mode 100644 testdata/ansible/memcached-operator/config/samples/kustomization.yaml create mode 100644 testdata/ansible/memcached-operator/config/scorecard/bases/config.yaml create mode 100644 testdata/ansible/memcached-operator/config/scorecard/kustomization.yaml create mode 100644 testdata/ansible/memcached-operator/config/scorecard/patches/basic.config.yaml create mode 100644 testdata/ansible/memcached-operator/config/scorecard/patches/olm.config.yaml create mode 100644 testdata/ansible/memcached-operator/config/testing/debug_logs_patch.yaml create mode 100644 testdata/ansible/memcached-operator/config/testing/kustomization.yaml create mode 100644 testdata/ansible/memcached-operator/config/testing/manager_image.yaml create mode 100644 testdata/ansible/memcached-operator/config/testing/pull_policy/Always.yaml create mode 100644 testdata/ansible/memcached-operator/config/testing/pull_policy/IfNotPresent.yaml create mode 100644 testdata/ansible/memcached-operator/config/testing/pull_policy/Never.yaml create mode 100644 testdata/ansible/memcached-operator/molecule/default/converge.yml create mode 100644 testdata/ansible/memcached-operator/molecule/default/create.yml create mode 100644 testdata/ansible/memcached-operator/molecule/default/destroy.yml create mode 100644 testdata/ansible/memcached-operator/molecule/default/kustomize.yml create mode 100644 testdata/ansible/memcached-operator/molecule/default/molecule.yml create mode 100644 testdata/ansible/memcached-operator/molecule/default/prepare.yml create mode 100644 testdata/ansible/memcached-operator/molecule/default/tasks/memcached_test.yml create mode 100644 testdata/ansible/memcached-operator/molecule/default/verify.yml create mode 100644 testdata/ansible/memcached-operator/molecule/kind/converge.yml create mode 100644 testdata/ansible/memcached-operator/molecule/kind/create.yml create mode 100644 testdata/ansible/memcached-operator/molecule/kind/destroy.yml create mode 100644 testdata/ansible/memcached-operator/molecule/kind/molecule.yml create mode 100644 testdata/ansible/memcached-operator/playbooks/.placeholder create mode 100644 testdata/ansible/memcached-operator/playbooks/memcached.yml create mode 100644 testdata/ansible/memcached-operator/requirements.yml create mode 100644 testdata/ansible/memcached-operator/roles/.placeholder create mode 100644 testdata/ansible/memcached-operator/roles/memcached/README.md create mode 100644 testdata/ansible/memcached-operator/roles/memcached/defaults/main.yml create mode 100644 testdata/ansible/memcached-operator/roles/memcached/files/.placeholder create mode 100644 testdata/ansible/memcached-operator/roles/memcached/handlers/main.yml create mode 100644 testdata/ansible/memcached-operator/roles/memcached/meta/main.yml create mode 100644 testdata/ansible/memcached-operator/roles/memcached/tasks/main.yml create mode 100644 testdata/ansible/memcached-operator/roles/memcached/templates/.placeholder create mode 100644 testdata/ansible/memcached-operator/roles/memcached/vars/main.yml create mode 100644 testdata/ansible/memcached-operator/watches.yaml create mode 100755 tools/scripts/fetch create mode 100644 tools/tools.go diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100644 index 0000000..fff4504 --- /dev/null +++ b/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,5 @@ + diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md new file mode 100644 index 0000000..60892e2 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -0,0 +1,63 @@ +--- +name: Bug Report +about: If things aren't working as expected. +title: '' +labels: '' +assignees: '' + +--- + +## Bug Report + + + +#### What did you do? + + + +#### What did you expect to see? + + + +#### What did you see instead? Under which circumstances? + + + +#### Environment + +**Operator type:** + + + + + + + +**Kubernetes cluster type:** + + + +`$ operator-sdk version` + + + +`$ go version` (if language is Go) + + + +`$ kubectl version` + + + +#### Possible Solution + + + +#### Additional context + + diff --git a/.github/ISSUE_TEMPLATE/doc-report.md b/.github/ISSUE_TEMPLATE/doc-report.md new file mode 100644 index 0000000..f0e6890 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/doc-report.md @@ -0,0 +1,28 @@ +--- +name: Doc Report +about: Raise an issue with the documentation. +title: '' +labels: kind/documentation +assignees: '' + +--- + + + +### What is the URL of the document? + + + +### Which section(s) is the issue in? + + + +### What needs fixing? + + + +#### Additional context + + diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md new file mode 100644 index 0000000..1715844 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature-request.md @@ -0,0 +1,28 @@ +--- +name: Feature Request +about: Suggest a feature +title: '' +labels: '' +assignees: '' + +--- + +## Feature Request + +#### Describe the problem you need a feature to resolve. + + + +#### Describe the solution you'd like. + + + + + + + + diff --git a/.github/ISSUE_TEMPLATE/support-question.md b/.github/ISSUE_TEMPLATE/support-question.md new file mode 100644 index 0000000..df58c8a --- /dev/null +++ b/.github/ISSUE_TEMPLATE/support-question.md @@ -0,0 +1,69 @@ +--- +name: Support Question +about: Any support questions you might have. +title: '' +labels: '' +assignees: '' + +--- + + + +## Type of question + + + + + + + + +## Question + +#### What did you do? + + + +#### What did you expect to see? + + + +#### What did you see instead? Under which circumstances? + + + +#### Environment + +**Operator type:** + + + + + + + +**Kubernetes cluster type:** + + + +`$ operator-sdk version` + + + +`$ go version` (if language is Go) + + + +`$ kubectl version` + + + +#### Additional context + + diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..8b9bcfb --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,25 @@ + + +**Description of the change:** + + +**Motivation for the change:** + + +**Checklist** + +If the pull request includes user-facing changes, extra documentation is required: +- [ ] Add a new changelog fragment in `changelog/fragments` (see [`changelog/fragments/00-template.yaml`](https://github.com/operator-framework/ansible-operator-plugins/tree/master/changelog/fragments/00-template.yaml)) +- [ ] Add or update relevant sections of the docs website in [`website/content/en/docs`](https://github.com/operator-framework/ansible-operator-plugins/tree/master/website/content/en/docs) diff --git a/.github/workflows/test-ansible.yml b/.github/workflows/test-ansible.yml new file mode 100644 index 0000000..cd1e7fc --- /dev/null +++ b/.github/workflows/test-ansible.yml @@ -0,0 +1,38 @@ +name: ansible +on: + pull_request: {} + +jobs: + e2e: + name: e2e + runs-on: ubuntu-22.04 + steps: + - uses: actions/setup-go@v3 + with: + go-version: 1.19 + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + - run: sudo rm -rf /usr/local/bin/kustomize + - run: make test-e2e-ansible + + e2e-molecule: + name: e2e-molecule + runs-on: ubuntu-22.04 + steps: + - uses: actions/setup-go@v3 + with: + go-version: 1.19 + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + - run: sudo rm -rf /usr/local/bin/kustomize + - uses: actions/setup-python@v4 + with: + python-version: '3.9' + - name: Run test e2e ansible molecule + run: | + env + pip3 install --user --upgrade setuptools pip + pip3 install --user ansible-core~=2.15.0 + make test-e2e-ansible-molecule diff --git a/.github/workflows/test-sanity.yml b/.github/workflows/test-sanity.yml new file mode 100644 index 0000000..21074d0 --- /dev/null +++ b/.github/workflows/test-sanity.yml @@ -0,0 +1,18 @@ +name: sanity +on: + pull_request: {} + +jobs: + sanity: + name: sanity + runs-on: ubuntu-22.04 + steps: + - uses: actions/setup-go@v3 + with: + go-version: 1.19 + id: go + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + - run: sudo rm -rf /usr/local/bin/kustomize + - run: make test-sanity diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml new file mode 100644 index 0000000..eaeda5a --- /dev/null +++ b/.github/workflows/unit.yml @@ -0,0 +1,19 @@ +name: unit +on: + pull_request: {} + +jobs: + unit: + name: unit + runs-on: ubuntu-22.04 + steps: + - uses: actions/setup-go@v3 + with: + go-version: 1.19 + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + - run: make test-unit + - uses: shogo82148/actions-goveralls@v1 + with: + path-to-profile: coverage.out diff --git a/.gitignore b/.gitignore index e69de29..dab1d92 100644 --- a/.gitignore +++ b/.gitignore @@ -0,0 +1,132 @@ +# Folders +.idea + +# Build artifacts +/build +/dist +**/bin/ + +# Test artifacts +**/testbin/ + +# CI GPG keyring +/.ci/gpg/keyring + +# Website +website/public/ +website/resources/ +website/node_modules/ +website/tech-doc-hugo + +# Ignore molecule samples testdata if it be generated in the testdata/ diretory +testdata/ansible/memcached-molecule-operator +testdata/ansible/advanced-molecule-operator + +# Trash files +*\.DS_Store + +# Created by https://www.toptal.com/developers/gitignore/api/go,vim,emacs,visualstudiocode +# Edit at https://www.toptal.com/developers/gitignore?templates=go,vim,emacs,visualstudiocode + +### Emacs ### +# -*- mode: gitignore; -*- +*~ +\#*\# +/.emacs.desktop +/.emacs.desktop.lock +*.elc +auto-save-list +tramp +.\#* + +# Org-mode +.org-id-locations +*_archive + +# flymake-mode +*_flymake.* + +# eshell files +/eshell/history +/eshell/lastdir + +# elpa packages +/elpa/ + +# reftex files +*.rel + +# AUCTeX auto folder +/auto/ + +# cask packages +.cask/ +dist/ + +# Flycheck +flycheck_*.el + +# server auth directory +/server/ + +# projectiles files +.projectile + +# directory configuration +.dir-locals.el + +# network security +/network-security.data + + +### Go ### +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +### Vim ### +# Swap +[._]*.s[a-v][a-z] +!*.svg # comment out if you don't need vector files +[._]*.sw[a-p] +[._]s[a-rt-v][a-z] +[._]ss[a-gi-z] +[._]sw[a-p] + +# Session +Session.vim +Sessionx.vim + +# Temporary +.netrwhist +# Auto-generated tag files +tags +# Persistent undo +[._]*.un~ + +### VisualStudioCode ### +.vscode/* +!.vscode/settings.json +!.vscode/tasks.json +!.vscode/launch.json +!.vscode/extensions.json +*.code-workspace + +### VisualStudioCode Patch ### +# Ignore all local history of files +.history +.ionide + +# End of https://www.toptal.com/developers/gitignore/api/go,vim,emacs,visualstudiocode + +# Python cache (Ansible molecule) +test/ansible/plugins/filter/__pycache__/ diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..50290d3 --- /dev/null +++ b/Makefile @@ -0,0 +1,179 @@ +SHELL = /bin/bash + +# IMAGE_VERSION represents the ansible-operator, helm-operator, and scorecard subproject versions. +# This value must be updated to the release tag of the most recent release, a change that must +# occur in the release commit. IMAGE_VERSION will be removed once each subproject that uses this +# version is moved to a separate repo and release process. +export IMAGE_VERSION = v1.30.0 +# Build-time variables to inject into binaries +export SIMPLE_VERSION = $(shell (test "$(shell git describe --tags)" = "$(shell git describe --tags --abbrev=0)" && echo $(shell git describe --tags)) || echo $(shell git describe --tags --abbrev=0)+git) +export GIT_VERSION = $(shell git describe --dirty --tags --always) +export GIT_COMMIT = $(shell git rev-parse HEAD) +export K8S_VERSION = 1.26.0 + +# Build settings +export TOOLS_DIR = tools/bin +export SCRIPTS_DIR = tools/scripts +REPO = $(shell go list -m) +BUILD_DIR = build +GO_ASMFLAGS = -asmflags "all=-trimpath=$(shell dirname $(PWD))" +GO_GCFLAGS = -gcflags "all=-trimpath=$(shell dirname $(PWD))" +GO_BUILD_ARGS = \ + $(GO_GCFLAGS) $(GO_ASMFLAGS) \ + -ldflags " \ + -X '$(REPO)/internal/version.Version=$(SIMPLE_VERSION)' \ + -X '$(REPO)/internal/version.GitVersion=$(GIT_VERSION)' \ + -X '$(REPO)/internal/version.GitCommit=$(GIT_COMMIT)' \ + -X '$(REPO)/internal/version.KubernetesVersion=v$(K8S_VERSION)' \ + -X '$(REPO)/internal/version.ImageVersion=$(IMAGE_VERSION)' \ + " \ + +export GO111MODULE = on +export CGO_ENABLED = 0 +export PATH := $(PWD)/$(BUILD_DIR):$(PWD)/$(TOOLS_DIR):$(PATH) + +##@ Development + +.PHONY: generate +generate: build # Generate CLI docs and samples + rm -rf testdata + go run ./hack/generate/samples/generate_testdata.go + go generate ./... + +.PHONY: fix +fix: ## Fixup files in the repo. + go mod tidy + go fmt ./... + make setup-lint + $(TOOLS_DIR)/golangci-lint run --fix + +.PHONY: setup-lint +setup-lint: ## Setup the lint + $(SCRIPTS_DIR)/fetch golangci-lint 1.51.2 + +.PHONY: lint +lint: setup-lint ## Run the lint check + $(TOOLS_DIR)/golangci-lint run + +.PHONY: clean +clean: ## Cleanup build artifacts and tool binaries. + rm -rf $(BUILD_DIR) dist $(TOOLS_DIR) + +##@ Build + +.PHONY: install +install: ## Install ansible-operator + go install $(GO_BUILD_ARGS) ./cmd/ansible-operator + +.PHONY: build +build: ## Build ansible-operator + @mkdir -p $(BUILD_DIR) + go build $(GO_BUILD_ARGS) -o $(BUILD_DIR) ./cmd/ansible-operator + +.PHONY: build/ansible-operator +build/ansible-operator: + go build $(GO_BUILD_ARGS) -o $(BUILD_DIR)/$(@F) ./cmd/$(@F) + +##@ Dev image build + +# Convenience wrapper for building all remotely hosted images. +.PHONY: image-build +IMAGE_TARGET_LIST = ansible-operator +image-build: $(foreach i,$(IMAGE_TARGET_LIST),image/$(i)) ## Build all images. + +# Convenience wrapper for building dependency base images. +.PHONY: image-build-base +IMAGE_BASE_TARGET_LIST = ansible-operator +image-build-base: $(foreach i,$(IMAGE_BASE_TARGET_LIST),image-base/$(i)) ## Build all images. + +# Build an image. +BUILD_IMAGE_REPO = quay.io/operator-framework +# When running in a terminal, this will be false. If true (ex. CI), print plain progress. +ifneq ($(shell test -t 0; echo $$?),0) +DOCKER_PROGRESS = --progress plain +endif +image/%: export DOCKER_CLI_EXPERIMENTAL = enabled +image/%: + docker buildx build $(DOCKER_PROGRESS) -t $(BUILD_IMAGE_REPO)/$*:dev -f ./images/$*/Dockerfile --load . + +image-base/%: export DOCKER_CLI_EXPERIMENTAL = enabled +image-base/%: + docker buildx build $(DOCKER_PROGRESS) -t $(BUILD_IMAGE_REPO)/$*-base:dev -f ./images/$*/base.Dockerfile --load images/$* +##@ Release + +## TODO: Add release targets here + +##@ Test + +.PHONY: test-all +test-all: test-static test-e2e ## Run all tests + +.PHONY: test-static +test-static: test-sanity test-unit ## Run all non-cluster-based tests + +.PHONY: test-sanity +test-sanity: generate fix ## Test repo formatting, linting, etc. + git diff --exit-code # fast-fail if generate or fix produced changes + ./hack/check-license.sh + ./hack/check-error-log-msg-format.sh + go vet ./... + make setup-lint + make lint + git diff --exit-code # diff again to ensure other checks don't change repo + +.PHONY: test-docs +test-docs: ## Test doc links + go run ./release/changelog/gen-changelog.go -validate-only + git submodule update --init --recursive website/ + ./hack/check-links.sh + +.PHONY: test-unit +TEST_PKGS = $(shell go list ./... | grep -v -E 'github.com/operator-framework/ansible-operator-plugins/test/') +test-unit: ## Run unit tests + go test -coverprofile=coverage.out -covermode=count -short $(TEST_PKGS) + +e2e_tests := test-e2e-ansible test-e2e-ansible-molecule +e2e_targets := test-e2e $(e2e_tests) +.PHONY: $(e2e_targets) + +.PHONY: test-e2e-setup +export KIND_CLUSTER := osdk-test + +KUBEBUILDER_ASSETS = $(PWD)/$(shell go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest && $(shell go env GOPATH)/bin/setup-envtest use $(K8S_VERSION) --bin-dir tools/bin/ -p path) +test-e2e-setup:: build dev-install cluster-create + +.PHONY: cluster-create +cluster-create:: + [[ "`$(TOOLS_DIR)/kind get clusters`" =~ "$(KIND_CLUSTER)" ]] || $(TOOLS_DIR)/kind create cluster --image="kindest/node:v$(K8S_VERSION)" --name $(KIND_CLUSTER) + +.PHONY: dev-install +dev-install:: + $(SCRIPTS_DIR)/fetch kind 0.17.0 + $(SCRIPTS_DIR)/fetch kubectl $(K8S_VERSION) # Install kubectl AFTER envtest because envtest includes its own kubectl binary + +.PHONY: test-e2e-teardown +test-e2e-teardown: + $(SCRIPTS_DIR)/fetch kind 0.17.0 + $(TOOLS_DIR)/kind delete cluster --name $(KIND_CLUSTER) + rm -f $(KUBECONFIG) + +# Double colon rules allow repeated rule declarations. +# Repeated rules are executed in the order they appear. +$(e2e_targets):: test-e2e-setup +test-e2e:: $(e2e_tests) ## Run e2e tests + +test-e2e-ansible:: image/ansible-operator ## Run Ansible e2e tests + go test -count=1 ./internal/ansible/proxy/... + go test ./test/e2e/ansible -v -ginkgo.v +test-e2e-ansible-molecule:: install dev-install image/ansible-operator ## Run molecule-based Ansible e2e tests + go run ./hack/generate/samples/molecule/generate.go + ./hack/tests/e2e-ansible-molecule.sh + +.DEFAULT_GOAL := help +.PHONY: help +help: ## Show this help screen. + @echo 'Usage: make ... ' + @echo '' + @echo 'Available targets are:' + @echo '' + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z0-9_-]+:.*?##/ { printf " \033[36m%-25s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) diff --git a/cmd/ansible-operator/main.go b/cmd/ansible-operator/main.go new file mode 100644 index 0000000..85325ee --- /dev/null +++ b/cmd/ansible-operator/main.go @@ -0,0 +1,43 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "log" + + "github.com/spf13/cobra" + _ "k8s.io/client-go/plugin/pkg/client/auth" + + "github.com/operator-framework/ansible-operator-plugins/internal/cmd/ansible-operator/run" + "github.com/operator-framework/ansible-operator-plugins/internal/cmd/ansible-operator/version" +) + +func main() { + root := cobra.Command{ + Short: "Reconcile an Ansible operator project using ansible-runner", + Long: `This binary runs an Ansible operator that reconciles Kubernetes resources +managed by the ansible-runner program. It can be run either directly or from an Ansible +operator project's image entrypoint +`, + Use: "ansible-operator", + } + + root.AddCommand(run.NewCmd()) + root.AddCommand(version.NewCmd()) + + if err := root.Execute(); err != nil { + log.Fatal(err) + } +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..c0ff83a --- /dev/null +++ b/go.mod @@ -0,0 +1,131 @@ +module github.com/operator-framework/ansible-operator-plugins + +go 1.19 + +require ( + github.com/go-logr/logr v1.2.3 + github.com/kr/text v0.2.0 + github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2 + github.com/onsi/ginkgo/v2 v2.7.0 + github.com/onsi/gomega v1.24.2 + github.com/operator-framework/api v0.17.4-0.20230223191600-0131a6301e42 + github.com/operator-framework/operator-lib v0.11.1-0.20230306195046-28cadc6b6055 + github.com/operator-framework/operator-registry v1.28.0 + github.com/prometheus/client_golang v1.14.0 + github.com/prometheus/client_model v0.3.0 + github.com/sirupsen/logrus v1.9.3 + github.com/spf13/afero v1.9.3 + github.com/spf13/cobra v1.6.1 + github.com/spf13/pflag v1.0.5 + github.com/spf13/viper v1.10.0 + github.com/stretchr/testify v1.8.2 + github.com/thoas/go-funk v0.8.0 + golang.org/x/text v0.9.0 + k8s.io/api v0.26.2 + k8s.io/apiextensions-apiserver v0.26.2 + k8s.io/apimachinery v0.26.2 + k8s.io/client-go v0.26.2 + k8s.io/kubectl v0.26.2 + k8s.io/utils v0.0.0-20230711102312-30195339c3c7 + sigs.k8s.io/controller-runtime v0.14.5 + sigs.k8s.io/kubebuilder/v3 v3.9.1 + sigs.k8s.io/yaml v1.3.0 +) + +require ( + github.com/Microsoft/hcsshim v0.9.4 // indirect + github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect + github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect + github.com/benbjohnson/clock v1.3.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/bshuster-repo/logrus-logstash-hook v1.0.0 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/containerd/cgroups v1.0.4 // indirect + github.com/containerd/containerd v1.7.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/docker/cli v20.10.21+incompatible // indirect + github.com/docker/docker-credential-helpers v0.7.0 // indirect + github.com/emicklei/go-restful/v3 v3.10.1 // indirect + github.com/evanphx/json-patch v5.6.0+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.6.0 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/go-logr/zapr v1.2.3 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/gobuffalo/flect v1.0.0 // indirect + github.com/gofrs/uuid v4.0.0+incompatible // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/google/cel-go v0.12.6 // indirect + github.com/google/gnostic v0.6.9 // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/gorilla/handlers v1.5.1 // indirect + github.com/h2non/filetype v1.1.1 // indirect + github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/imdario/mergo v0.3.13 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/magiconair/properties v1.8.5 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect + github.com/pelletier/go-toml v1.9.5 // indirect + github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/common v0.37.0 // indirect + github.com/prometheus/procfs v0.8.0 // indirect + github.com/spf13/cast v1.5.0 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/stoewer/go-strcase v1.2.0 // indirect + github.com/subosito/gotenv v1.2.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0 // indirect + go.opentelemetry.io/otel/sdk v1.14.0 // indirect + go.opentelemetry.io/otel/trace v1.14.0 // indirect + go.uber.org/atomic v1.10.0 // indirect + go.uber.org/goleak v1.2.1 // indirect + go.uber.org/multierr v1.8.0 // indirect + go.uber.org/zap v1.24.0 // indirect + golang.org/x/mod v0.10.0 // indirect + golang.org/x/net v0.10.0 // indirect + golang.org/x/oauth2 v0.6.0 // indirect + golang.org/x/sys v0.8.0 // indirect + golang.org/x/term v0.8.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.9.1 // indirect + gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20230320184635-7606e756e683 // indirect + google.golang.org/grpc v1.53.0 // indirect + google.golang.org/protobuf v1.29.1 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.66.2 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/component-base v0.26.2 // indirect + k8s.io/klog/v2 v2.90.1 // indirect + k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect + sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect +) + +replace ( + // TODO(ryantking): investigate further, v1.5 breaks github.com/deislabs/oras, might be able to update whatever uses the old version of oras + github.com/containerd/containerd => github.com/containerd/containerd v1.4.11 + // latest tag resolves to a very old version. this is only used for spinning up local test registries + github.com/docker/distribution => github.com/docker/distribution v0.0.0-20191216044856-a8371794149d + github.com/mattn/go-sqlite3 => github.com/mattn/go-sqlite3 v1.10.0 +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..d045f22 --- /dev/null +++ b/go.sum @@ -0,0 +1,1104 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/hcsshim v0.9.4 h1:mnUj0ivWy6UzbB1uLFqKR6F+ZyiDc7j4iGgHTpO+5+I= +github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/adrg/xdg v0.4.0 h1:RzRqFcjH4nE5C6oTAxhBtoE2IRyjBSa62SCbyPidvls= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 h1:yL7+Jz0jTC6yykIK/Wh74gnTJnrGr5AyrNMXuA0gves= +github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= +github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/bugsnag-go v1.5.3 h1:yeRUT3mUE13jL1tGwvoQsKdVbAsQx9AJ+fqahKveP04= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/bugsnag/panicwrap v1.2.0 h1:OzrKrRvXis8qEvOkfcxNcYbOd2O7xXS2nnKMEMABFQA= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= +github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= +github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/containerd v1.4.11 h1:QCGOUN+i70jEEL/A6JVIbhy4f4fanzAzSR4kNG7SlcE= +github.com/containerd/containerd v1.4.11/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= +github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= +github.com/containerd/ttrpc v1.1.0 h1:GbtyLRxb0gOLR0TYQWt3O6B0NvT8tMdorEHqIQo/lWI= +github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v20.10.21+incompatible h1:qVkgyYUnOLQ98LtXBrwd/duVqPT2X4SHndOuGsfwyhU= +github.com/docker/cli v20.10.21+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v0.0.0-20191216044856-a8371794149d h1:jC8tT/S0OGx2cswpeUTn4gOIea8P08lD3VFQT0cOZ50= +github.com/docker/distribution v0.0.0-20191216044856-a8371794149d/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.24+incompatible h1:Ugvxm7a8+Gz6vqQYQQ2W7GYq5EUPaAiuPgIfVyI3dYE= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= +github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= +github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= +github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/garyburd/redigo v1.6.0 h1:0VruCpn7yAIIu7pWVClQC8wxCJEcG3nyzpMSHKi1PQc= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= +github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobuffalo/flect v1.0.0 h1:eBFmskjXZgAOagiTXJH25Nt5sdFwNRcb8DKZsIsAUQI= +github.com/gobuffalo/flect v1.0.0/go.mod h1:l9V6xSb4BlXwsxEMj3FVEub2nkdQjWhPvD8XTTlHPQc= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-migrate/migrate/v4 v4.16.1 h1:O+0C55RbMN66pWm5MjO6mw0px6usGpY0+bkSGW9zCo0= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/cel-go v0.12.6 h1:kjeKudqV0OygrAqA9fX6J55S8gj+Jre2tckIm5RoG4M= +github.com/google/cel-go v0.12.6/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw= +github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= +github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= +github.com/h2non/filetype v1.1.1 h1:xvOwnXKAckvtLWsN398qS9QhlxlnVXBjXBydK2/UFB4= +github.com/h2non/filetype v1.1.1/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY= +github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c h1:fEE5/5VNnYUoBOj2I9TP8Jc+a7lge3QWn9DKE7NCwfc= +github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c/go.mod h1:ObS/W+h8RYb1Y7fYivughjxojTmIu5iAIjSrSLCLeqE= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= +github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/joefitzgerald/rainbow-reporter v0.1.0 h1:AuMG652zjdzI0YCCnXAqATtRBpGXMcAnrajcaTrSeuo= +github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2 h1:g+4J5sZg6osfvEfkRZxJ1em0VT95/UOZgi/l7zi1/oE= +github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo/v2 v2.7.0 h1:/XxtEV3I3Eif/HobnVx9YmJgk8ENdRsuUmM+fLCFNow= +github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.24.2 h1:J/tulyYK6JwBldPViHJReihxxZ+22FHs0piGjQAvoUE= +github.com/onsi/gomega v1.24.2/go.mod h1:gs3J10IS7Z7r7eXRoNJIrNqU4ToQukCJhFtKrWgHWnk= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b h1:YWuSjZCQAPM8UUBLkYUk1e+rZcvWHJmFb6i6rM44Xs8= +github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/operator-framework/api v0.17.4-0.20230223191600-0131a6301e42 h1:d/Pnr19TnmIq3zQ6ebewC+5jt5zqYbRkvYd37YZENQY= +github.com/operator-framework/api v0.17.4-0.20230223191600-0131a6301e42/go.mod h1:l/cuwtPxkVUY7fzYgdust2m9tlmb8I4pOvbsUufRb24= +github.com/operator-framework/operator-lib v0.11.1-0.20230306195046-28cadc6b6055 h1:G9N8wEf9qDZ/4Fj5cbIejKUoFOYta0v72Yg8tPAdvc0= +github.com/operator-framework/operator-lib v0.11.1-0.20230306195046-28cadc6b6055/go.mod h1:A7xcxZPfdepC14FA5YyA+jpbAeD7q4awtT7mSAZntuU= +github.com/operator-framework/operator-registry v1.28.0 h1:vtmd2WgJxkx7vuuOxW4k5Le/oo0SfonSeJVMU3rKIfk= +github.com/operator-framework/operator-registry v1.28.0/go.mod h1:UYw3uaZyHwHgnczLRYmUqMpgRgP2EfkqOsaR+LI+nK8= +github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sclevine/spec v1.2.0 h1:1Jwdf9jSfDl9NVmt8ndHqbTZ7XCCPbh1jI3hkDBHVYA= +github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk= +github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= +github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.10.0 h1:mXH0UwHS4D2HwWZa75im4xIQynLfblmWV7qcWpfv0yk= +github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM= +github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/thoas/go-funk v0.8.0 h1:JP9tKSvnpFVclYgDM0Is7FD9M4fhPvqA0s0BsXmzSRQ= +github.com/thoas/go-funk v0.8.0/go.mod h1:+IWnUfUmFO1+WVYQWQtIJHeRRdaIyyYglZN7xzUPe4Q= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/go-metrics v0.0.0-20150112132944-c25f46c4b940 h1:p7OofyZ509h8DmPLh8Hn+EIIZm/xYhdZHJ9GnXHdr6U= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/gorelic v0.0.7 h1:4DTF1WOM2ZZS/xMOkTFBOcb6XiHu/PKn3rVo6dbewQE= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20160601141957-9c099fbc30e9 h1:AsFN8kXcCVkUFHyuzp1FtYbzp1nCO/H6+1uPSGEyPzM= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.0 h1:Ajldaqhxqw/gNzQA45IKFWLdG7jZuXX/wBW1d5qvbUI= +go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 h1:/fXHZHGvro6MVqV34fJzDhi7sHGpX3Ej/Qjmfn003ho= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0 h1:TKf2uAs2ueguzLaxOCBXNpHxfO/aC7PAdDsSH0IbeRQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0 h1:ap+y8RXX3Mu9apKVtOkM6WSFESLM8K3wNQyOU8sWHcc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0/go.mod h1:5w41DY6S9gZrbjuq6Y+753e96WfPha5IcsOSZTtullM= +go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= +go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY= +go.opentelemetry.io/otel/sdk v1.14.0/go.mod h1:bwIC5TjrNG6QDCHNWvW4HLHtUQ4I+VQDsnjhvyZCALM= +go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M= +go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= +go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= +golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= +golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= +golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= +gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= +google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20230320184635-7606e756e683 h1:khxVcsk/FhnzxMKOyD+TDGwjbEOpcPuIpmafPGFmhMA= +google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.29.1 h1:7QBf+IK2gx70Ap/hDsOmam3GE0v9HicjfEdAxE62UoM= +google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI= +gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.26.2 h1:dM3cinp3PGB6asOySalOZxEG4CZ0IAdJsrYZXE/ovGQ= +k8s.io/api v0.26.2/go.mod h1:1kjMQsFE+QHPfskEcVNgL3+Hp88B80uj0QtSOlj8itU= +k8s.io/apiextensions-apiserver v0.26.2 h1:/yTG2B9jGY2Q70iGskMf41qTLhL9XeNN2KhI0uDgwko= +k8s.io/apiextensions-apiserver v0.26.2/go.mod h1:Y7UPgch8nph8mGCuVk0SK83LnS8Esf3n6fUBgew8SH8= +k8s.io/apimachinery v0.26.2 h1:da1u3D5wfR5u2RpLhE/ZtZS2P7QvDgLZTi9wrNZl/tQ= +k8s.io/apimachinery v0.26.2/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I= +k8s.io/apiserver v0.26.2 h1:Pk8lmX4G14hYqJd1poHGC08G03nIHVqdJMR0SD3IH3o= +k8s.io/client-go v0.26.2 h1:s1WkVujHX3kTp4Zn4yGNFK+dlDXy1bAAkIl+cFAiuYI= +k8s.io/client-go v0.26.2/go.mod h1:u5EjOuSyBa09yqqyY7m3abZeovO/7D/WehVVlZ2qcqU= +k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= +k8s.io/component-base v0.26.2 h1:IfWgCGUDzrD6wLLgXEstJKYZKAFS2kO+rBRi0p3LqcI= +k8s.io/component-base v0.26.2/go.mod h1:DxbuIe9M3IZPRxPIzhch2m1eT7uFrSBJUBuVCQEBivs= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= +k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= +k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= +k8s.io/kubectl v0.26.2 h1:SMPB4j48eVFxsYluBq3VLyqXtE6b72YnszkbTAtFye4= +k8s.io/kubectl v0.26.2/go.mod h1:KYWOXSwp2BrDn3kPeoU/uKzKtdqvhK1dgZGd0+no4cM= +k8s.io/utils v0.0.0-20230711102312-30195339c3c7 h1:ZgnF1KZsYxWIifwSNZFZgNtWE89WI5yiP5WwlfDoIyc= +k8s.io/utils v0.0.0-20230711102312-30195339c3c7/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.35 h1:+xBL5uTc+BkPBwmMi3vYfUJjq+N3K+H6PXeETwf5cPI= +sigs.k8s.io/controller-runtime v0.14.5 h1:6xaWFqzT5KuAQ9ufgUaj1G/+C4Y1GRkhrxl+BJ9i+5s= +sigs.k8s.io/controller-runtime v0.14.5/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/kubebuilder/v3 v3.9.1 h1:9JNKRg9GzlLBYwYRx1nQlwha8+Pd9gPyat1lj7T+jZw= +sigs.k8s.io/kubebuilder/v3 v3.9.1/go.mod h1:Z4boifT/XHIZTVEAIZaPTXqjhuK8Msx2iPYJy8ic6vg= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/hack/check-error-log-msg-format.sh b/hack/check-error-log-msg-format.sh new file mode 100755 index 0000000..7aef360 --- /dev/null +++ b/hack/check-error-log-msg-format.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +set -o nounset +set -o pipefail + +source "hack/lib/common.sh" + +echo "Checking format of error and log messages..." +allfiles=$(listFiles|grep -v ./internal/bindata/...) +log_case_output=$(grep -PRn '(Error\((.*[Ee]rr|nil), |^(?!.*(fmt|errors)).+\.Error(f)?\(|Fatal(f)?\(|Info(f)?\(|Warn(f)?\()"[[:lower:]]' $allfiles | sort -u) +if [ -n "${log_case_output}" ]; then + echo -e "Log messages do not begin with upper case:\n${log_case_output}" +fi +err_case_output=$(grep -ERn '(errors\.New|fmt\.Errorf)\("[[:upper:]]' $allfiles | sort -u) +if [ -n "${err_case_output}" ]; then + echo -e "Error messages do not begin with lower case:\n${err_case_output}" +fi +err_punct_output=$(grep -ERn '(errors\.New|fmt\.Errorf)\(".*\."' $allfiles | sort -u) +if [ -n "${err_punct_output}" ]; then + echo -e "Error messages should not have ending punctuation:\n${err_punct_output}" +fi + +if [[ -n "$log_case_output" || -n "$err_case_output" || -n "$err_punct_output" ]]; then + exit 255 +fi diff --git a/hack/check-license.sh b/hack/check-license.sh new file mode 100755 index 0000000..1c10c64 --- /dev/null +++ b/hack/check-license.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +set -o errexit +set -o nounset +set -o pipefail + +source "hack/lib/common.sh" + +echo "Checking for license header..." +allfiles=$(listFiles|grep -v ./internal/bindata/...) +licRes="" +for file in $allfiles; do + if ! head -n3 "${file}" | grep -Eq "(Copyright|generated|GENERATED|Licensed)" ; then + licRes="${licRes}\n"$(echo -e " ${file}") + fi +done +if [ -n "${licRes}" ]; then + echo -e "license header checking failed:\n${licRes}" + exit 255 +fi diff --git a/hack/generate/samples/generate_testdata.go b/hack/generate/samples/generate_testdata.go new file mode 100644 index 0000000..6d82be2 --- /dev/null +++ b/hack/generate/samples/generate_testdata.go @@ -0,0 +1,57 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "flag" + "os" + "path/filepath" + + "github.com/operator-framework/ansible-operator-plugins/hack/generate/samples/internal/ansible" + + log "github.com/sirupsen/logrus" + + "github.com/operator-framework/ansible-operator-plugins/internal/testutils" +) + +func main() { + // binaryPath allow inform the binary that should be used. + // By default it is operator-sdk + var binaryPath string + + flag.StringVar(&binaryPath, "bin", testutils.BinaryName, "Binary path that should be used") + flag.Parse() + + // Make the binary path absolute if pathed, for reproducibility and debugging purposes. + if dir, _ := filepath.Split(binaryPath); dir != "" { + tmp, err := filepath.Abs(binaryPath) + if err != nil { + log.Fatalf("Failed to make binary path %q absolute: %v", binaryPath, err) + } + binaryPath = tmp + } + + wd, err := os.Getwd() + if err != nil { + log.Fatal(err) + } + + // samplesPath is the path where all samples should be generated + samplesPath := filepath.Join(wd, "testdata") + log.Infof("writing sample directories under %s", samplesPath) + + log.Infof("creating Ansible Memcached Sample") + ansible.GenerateMemcachedSamples(binaryPath, samplesPath) +} diff --git a/hack/generate/samples/internal/ansible/advanced_molecule.go b/hack/generate/samples/internal/ansible/advanced_molecule.go new file mode 100644 index 0000000..693b85b --- /dev/null +++ b/hack/generate/samples/internal/ansible/advanced_molecule.go @@ -0,0 +1,600 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ansible + +import ( + "fmt" + "os/exec" + "path/filepath" + "strings" + + log "github.com/sirupsen/logrus" + kbutil "sigs.k8s.io/kubebuilder/v3/pkg/plugin/util" + + "github.com/operator-framework/ansible-operator-plugins/hack/generate/samples/internal/pkg" +) + +// AdvancedMolecule defines the context for the sample +type AdvancedMolecule struct { + ctx *pkg.SampleContext +} + +// GenerateAdvancedMoleculeSample will call all actions to create the directory and generate the sample +// The Context to run the samples are not the same in the e2e test. In this way, note that it should NOT +// be called in the e2e tests since it will call the Prepare() to set the sample context and generate the files +// in the testdata directory. The e2e tests only ought to use the Run() method with the TestContext. +func GenerateAdvancedMoleculeSample(binaryPath, samplesPath string) { + ctx, err := pkg.NewSampleContext(binaryPath, filepath.Join(samplesPath, "advanced-molecule-operator"), + "GO111MODULE=on") + pkg.CheckError("generating Ansible Molecule Advanced Operator context", err) + + molecule := AdvancedMolecule{&ctx} + molecule.Prepare() + molecule.Run() +} + +// Prepare the Context for the Memcached Ansible Sample +// Note that sample directory will be re-created and the context data for the sample +// will be set such as the domain and GVK. +func (ma *AdvancedMolecule) Prepare() { + log.Infof("destroying directory for memcached Ansible samples") + ma.ctx.Destroy() + + log.Infof("creating directory") + err := ma.ctx.Prepare() + pkg.CheckError("creating directory for Advanced Molecule Sample", err) + + log.Infof("setting domain and GVK") + // nolint:goconst + ma.ctx.Domain = "example.com" + // nolint:goconst + ma.ctx.Version = "v1alpha1" + ma.ctx.Group = "test" + ma.ctx.Kind = "InventoryTest" +} + +// Run the steps to create the Memcached Ansible Sample +func (ma *AdvancedMolecule) Run() { + log.Infof("creating the project") + err := ma.ctx.Init( + "--plugins", "ansible", + "--group", ma.ctx.Group, + "--version", ma.ctx.Version, + "--kind", ma.ctx.Kind, + "--domain", ma.ctx.Domain, + "--generate-role", + "--generate-playbook") + pkg.CheckError("creating the project", err) + + log.Infof("enabling multigroup support") + err = ma.ctx.AllowProjectBeMultiGroup() + pkg.CheckError("updating PROJECT file", err) + + inventoryRoleTask := filepath.Join(ma.ctx.Dir, "roles", "inventorytest", "tasks", "main.yml") + log.Infof("inserting code to inventory role task") + const inventoryRoleTaskFragment = ` +- when: sentinel | test + block: + - kubernetes.core.k8s: + definition: + apiVersion: v1 + kind: ConfigMap + metadata: + name: inventory-cm + namespace: '{{ meta.namespace }}' + data: + sentinel: '{{ sentinel }}' + groups: '{{ groups | to_nice_yaml }}'` + err = kbutil.ReplaceInFile( + inventoryRoleTask, + "# tasks file for InventoryTest", + inventoryRoleTaskFragment) + pkg.CheckError("replacing inventory task", err) + + log.Infof("updating inventorytest sample") + err = kbutil.ReplaceInFile( + filepath.Join(ma.ctx.Dir, "config", "samples", "test_v1alpha1_inventorytest.yaml"), + "name: inventorytest-sample", + inventorysampleFragment) + pkg.CheckError("updating inventorytest sample", err) + + log.Infof("updating spec of inventorytest sample") + err = kbutil.ReplaceInFile( + filepath.Join(ma.ctx.Dir, "config", "samples", "test_v1alpha1_inventorytest.yaml"), + "# TODO(user): Add fields here", + "size: 3") + pkg.CheckError("updating spec of inventorytest sample", err) + + ma.addPlaybooks() + ma.updatePlaybooks() + ma.addMocksFromTestdata() + ma.updateDockerfile() + ma.updateConfig() +} + +func (ma *AdvancedMolecule) updateConfig() { + log.Infof("adding customized roles") + const cmRolesFragment = ` ## + ## Base operator rules + ## + - apiGroups: + - "" + resources: + - configmaps + - namespaces + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - apps + resources: + - configmaps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +#+kubebuilder:scaffold:rules` + err := kbutil.ReplaceInFile( + filepath.Join(ma.ctx.Dir, "config", "rbac", "role.yaml"), + "#+kubebuilder:scaffold:rules", + cmRolesFragment) + pkg.CheckError("adding customized roles", err) + + log.Infof("adding manager arg") + const ansibleVaultArg = ` + - --ansible-args='--vault-password-file /opt/ansible/pwd.yml'` + err = kbutil.InsertCode( + filepath.Join(ma.ctx.Dir, "config", "manager", "manager.yaml"), + "- --leader-election-id=advanced-molecule-operator", + ansibleVaultArg) + pkg.CheckError("adding manager arg", err) + + log.Infof("adding manager env") + const managerEnv = ` + - name: ANSIBLE_DEBUG_LOGS + value: "TRUE" + - name: ANSIBLE_INVENTORY + value: /opt/ansible/inventory` + err = kbutil.InsertCode( + filepath.Join(ma.ctx.Dir, "config", "manager", "manager.yaml"), + "value: explicit", + managerEnv) + pkg.CheckError("adding manager env", err) + + log.Infof("adding vaulting args to the proxy auth") + const managerAuthArgs = ` + - "--ansible-args='--vault-password-file /opt/ansible/pwd.yml'"` + err = kbutil.InsertCode( + filepath.Join(ma.ctx.Dir, "config", "default", "manager_auth_proxy_patch.yaml"), + "- \"--leader-election-id=advanced-molecule-operator\"", + managerAuthArgs) + pkg.CheckError("adding vaulting args to the proxy auth", err) + + log.Infof("adding task to not pull image to the config/testing") + err = kbutil.ReplaceInFile( + filepath.Join(ma.ctx.Dir, "config", "testing", "kustomization.yaml"), + "- manager_image.yaml", + "- manager_image.yaml\n- pull_policy/Never.yaml") + pkg.CheckError("adding task to not pull image to the config/testing", err) +} + +func (ma *AdvancedMolecule) addMocksFromTestdata() { + log.Infof("adding ansible.cfg") + cmd := exec.Command("cp", "../../../hack/generate/samples/internal/ansible/testdata/ansible.cfg", ma.ctx.Dir) + _, err := ma.ctx.Run(cmd) + pkg.CheckError("adding ansible.cfg", err) + + log.Infof("adding plugins/") + cmd = exec.Command("cp", "-r", "../../../hack/generate/samples/internal/ansible/testdata/plugins/", filepath.Join(ma.ctx.Dir, "plugins/")) + _, err = ma.ctx.Run(cmd) + pkg.CheckError("adding plugins/", err) + + log.Infof("adding fixture_collection/") + cmd = exec.Command("cp", "-r", "../../../hack/generate/samples/internal/ansible/testdata/fixture_collection/", filepath.Join(ma.ctx.Dir, "fixture_collection/")) + _, err = ma.ctx.Run(cmd) + pkg.CheckError("adding fixture_collection/", err) + + log.Infof("replacing watches.yaml") + cmd = exec.Command("cp", "-r", "../../../hack/generate/samples/internal/ansible/testdata/watches.yaml", ma.ctx.Dir) + _, err = ma.ctx.Run(cmd) + pkg.CheckError("replacing watches.yaml", err) + + log.Infof("adding tasks/") + cmd = exec.Command("cp", "-r", "../../../hack/generate/samples/internal/ansible/testdata/tasks/", filepath.Join(ma.ctx.Dir, "molecule/default/")) + _, err = ma.ctx.Run(cmd) + pkg.CheckError("adding tasks/", err) + + log.Infof("adding secret playbook") + cmd = exec.Command("cp", "-r", "../../../hack/generate/samples/internal/ansible/testdata/secret.yml", filepath.Join(ma.ctx.Dir, "playbooks/secret.yml")) + _, err = ma.ctx.Run(cmd) + pkg.CheckError("adding secret playbook", err) + + log.Infof("adding inventory/") + cmd = exec.Command("cp", "-r", "../../../hack/generate/samples/internal/ansible/testdata/inventory/", filepath.Join(ma.ctx.Dir, "inventory/")) + _, err = ma.ctx.Run(cmd) + pkg.CheckError("adding inventory/", err) + + log.Infof("adding finalizer for finalizerconcurrencytest") + cmd = exec.Command("cp", "../../../hack/generate/samples/internal/ansible/testdata/playbooks/finalizerconcurrencyfinalizer.yml", filepath.Join(ma.ctx.Dir, "playbooks/finalizerconcurrencyfinalizer.yml")) + _, err = ma.ctx.Run(cmd) + pkg.CheckError("adding finalizer for finalizerconccurencytest", err) + +} + +func (ma *AdvancedMolecule) updateDockerfile() { + log.Infof("replacing project Dockerfile to use ansible base image with the dev tag") + err := kbutil.ReplaceRegexInFile( + filepath.Join(ma.ctx.Dir, "Dockerfile"), + "quay.io/operator-framework/ansible-operator:.*", + "quay.io/operator-framework/ansible-operator:dev") + pkg.CheckError("replacing Dockerfile", err) + + log.Infof("inserting code to Dockerfile") + const dockerfileFragment = ` + +# Customizations done to check advanced scenarios +COPY inventory/ ${HOME}/inventory/ +COPY plugins/ ${HOME}/plugins/ +COPY ansible.cfg /etc/ansible/ansible.cfg +COPY fixture_collection/ /tmp/fixture_collection/ +USER root +RUN chmod -R ug+rwx /tmp/fixture_collection +USER 1001 +RUN ansible-galaxy collection build /tmp/fixture_collection/ --output-path /tmp/fixture_collection/ \ + && ansible-galaxy collection install /tmp/fixture_collection/operator_sdk-test_fixtures-0.0.0.tar.gz +RUN echo abc123 > /opt/ansible/pwd.yml \ + && ansible-vault encrypt_string --vault-password-file /opt/ansible/pwd.yml 'thisisatest' --name 'the_secret' > /opt/ansible/vars.yml +` + err = kbutil.InsertCode( + filepath.Join(ma.ctx.Dir, "Dockerfile"), + "COPY playbooks/ ${HOME}/playbooks/", + dockerfileFragment) + pkg.CheckError("replacing Dockerfile", err) +} + +func (ma *AdvancedMolecule) updatePlaybooks() { + log.Infof("adding playbook for argstest") + const argsPlaybook = `--- +- hosts: localhost + gather_facts: no + collections: + - kubernetes.core + tasks: + - name: Get the decrypted message variable + include_vars: + file: /opt/ansible/vars.yml + name: the_secret + - name: Create configmap + k8s: + definition: + apiVersion: v1 + kind: ConfigMap + metadata: + name: '{{ meta.name }}' + namespace: '{{ meta.namespace }}' + data: + msg: The decrypted value is {{the_secret.the_secret}} +` + err := kbutil.ReplaceInFile( + filepath.Join(ma.ctx.Dir, "playbooks", "argstest.yml"), + originalPlaybookFragment, + argsPlaybook) + pkg.CheckError("adding playbook for argstest", err) + + log.Infof("adding playbook for casetest") + const casePlaybook = `--- +- hosts: localhost + gather_facts: no + collections: + - kubernetes.core + tasks: + - name: Create configmap + k8s: + definition: + apiVersion: v1 + kind: ConfigMap + metadata: + name: '{{ meta.name }}' + namespace: '{{ meta.namespace }}' + data: + shouldBeCamel: '{{ camelCaseVar | default("false") }}' +` + err = kbutil.ReplaceInFile( + filepath.Join(ma.ctx.Dir, "playbooks", "casetest.yml"), + originalPlaybookFragment, + casePlaybook) + pkg.CheckError("adding playbook for casetest", err) + + log.Infof("adding playbook for inventorytest") + const inventoryPlaybook = `--- +- hosts: test + gather_facts: no + tasks: + - import_role: + name: "inventorytest" + +- hosts: localhost + gather_facts: no + tasks: + - command: echo hello + - debug: msg='{{ "hello" | test }}'` + err = kbutil.ReplaceInFile( + filepath.Join(ma.ctx.Dir, "playbooks", "inventorytest.yml"), + "---\n- hosts: localhost\n gather_facts: no\n collections:\n - kubernetes.core\n - operator_sdk.util\n tasks:\n - import_role:\n name: \"inventorytest\"", + inventoryPlaybook) + pkg.CheckError("adding playbook for inventorytest", err) + + log.Infof("adding playbook for reconciliationtest") + const reconciliationPlaybook = `--- +- hosts: localhost + gather_facts: no + collections: + - kubernetes.core + tasks: + - name: retrieve configmap + k8s_info: + api_version: v1 + kind: ConfigMap + namespace: '{{ meta.namespace }}' + name: '{{ meta.name }}' + register: configmap + + - name: create configmap + k8s: + definition: + apiVersion: v1 + kind: ConfigMap + metadata: + name: '{{ meta.name }}' + namespace: '{{ meta.namespace }}' + data: + iterations: '1' + when: configmap.resources|length == 0 + + - name: Update ConfigMap + k8s: + definition: + apiVersion: v1 + kind: ConfigMap + metadata: + name: '{{ meta.name }}' + namespace: '{{ meta.namespace }}' + data: + iterations: '{{ (configmap.resources.0.data.iterations|int) + 1 }}' + when: configmap.resources|length > 0 and (configmap.resources.0.data.iterations|int) < 5 + + - name: retrieve configmap + k8s_info: + api_version: v1 + kind: ConfigMap + namespace: '{{ meta.namespace }}' + name: '{{ meta.name }}' + register: configmap + + - name: Using the requeue_after module + operator_sdk.util.requeue_after: + time: 1s + when: configmap.resources|length > 0 and (configmap.resources.0.data.iterations|int) < 5 +` + err = kbutil.ReplaceInFile( + filepath.Join(ma.ctx.Dir, "playbooks", "reconciliationtest.yml"), + originalPlaybookFragment, + reconciliationPlaybook) + pkg.CheckError("adding playbook for reconciliationtest", err) + + log.Infof("adding playbook for selectortest") + const selectorPlaybook = `--- +- hosts: localhost + gather_facts: no + collections: + - kubernetes.core + tasks: + - name: Create configmap + k8s: + definition: + apiVersion: v1 + kind: ConfigMap + metadata: + name: '{{ meta.name }}' + namespace: '{{ meta.namespace }}' + data: + hello: "world" +` + err = kbutil.ReplaceInFile( + filepath.Join(ma.ctx.Dir, "playbooks", "selectortest.yml"), + originalPlaybookFragment, + selectorPlaybook) + pkg.CheckError("adding playbook for selectortest", err) + + log.Infof("adding playbook for subresourcestest") + const subresourcesPlaybook = `--- +- hosts: localhost + gather_facts: no + collections: + - kubernetes.core + - operator_sdk.util + + tasks: + - name: Deploy busybox pod + k8s: + definition: + apiVersion: v1 + kind: Pod + metadata: + name: '{{ meta.name }}-busybox' + namespace: '{{ meta.namespace }}' + spec: + containers: + - image: busybox + name: sleep + args: + - "/bin/sh" + - "-c" + - "while true ; do echo '{{ log_message }}' ; sleep 5 ; done" + wait: yes + + - name: Execute command in busybox pod + k8s_exec: + namespace: '{{ meta.namespace }}' + pod: '{{ meta.name }}-busybox' + command: '{{ exec_command }}' + register: exec_result + + - name: Get logs from busybox pod + k8s_log: + name: '{{ meta.name }}-busybox' + namespace: '{{ meta.namespace }}' + register: log_result + + - name: Write results to resource status + k8s_status: + api_version: test.example.com/v1alpha1 + kind: SubresourcesTest + name: '{{ meta.name }}' + namespace: '{{ meta.namespace }}' + status: + execCommandStdout: '{{ exec_result.stdout.strip() }}' + execCommandStderr: '{{ exec_result.stderr.strip() }}' + logs: '{{ log_result.log }}' +` + err = kbutil.ReplaceInFile( + filepath.Join(ma.ctx.Dir, "playbooks", "subresourcestest.yml"), + originalPlaybookFragment, + subresourcesPlaybook) + pkg.CheckError("adding playbook for subresourcestest", err) + + log.Infof("adding playbook for clusterannotationtest") + const clusterAnnotationTest = `--- +- hosts: localhost + gather_facts: no + collections: + - kubernetes.core + tasks: + + - name: create externalnamespace + k8s: + name: "externalnamespace" + api_version: v1 + kind: "Namespace" + definition: + metadata: + labels: + foo: bar + + - name: create configmap + k8s: + definition: + apiVersion: v1 + kind: ConfigMap + metadata: + namespace: "externalnamespace" + name: '{{ meta.name }}' + data: + foo: bar +` + err = kbutil.ReplaceInFile( + filepath.Join(ma.ctx.Dir, "playbooks", "clusterannotationtest.yml"), + originalPlaybookFragment, + clusterAnnotationTest) + pkg.CheckError("adding playbook for clusterannotationtest", err) + + log.Infof("adding playbook for finalizerconcurrencytest") + const finalizerConcurrencyTest = `--- +- hosts: localhost + gather_facts: no + collections: + - kubernetes.core + - operator_sdk.util + + tasks: + - debug: + msg: "Pausing until configmap exists" + + - name: Wait for configmap + k8s_info: + apiVersion: v1 + kind: ConfigMap + name: unpause-reconciliation + namespace: osdk-test + wait: yes + wait_sleep: 10 + wait_timeout: 360 + + - debug: + msg: "Unpause!" +` + err = kbutil.ReplaceInFile( + filepath.Join(ma.ctx.Dir, "playbooks", "finalizerconcurrencytest.yml"), + originalPlaybookFragment, + finalizerConcurrencyTest) + pkg.CheckError("adding playbook for finalizerconcurrencytest", err) +} + +func (ma *AdvancedMolecule) addPlaybooks() { + allPlaybookKinds := []string{ + "ArgsTest", + "CaseTest", + "CollectionTest", + "ClusterAnnotationTest", + "FinalizerConcurrencyTest", + "ReconciliationTest", + "SelectorTest", + "SubresourcesTest", + } + + // Create API + for _, k := range allPlaybookKinds { + logMsgForKind := fmt.Sprintf("creating an API %s", k) + log.Infof(logMsgForKind) + err := ma.ctx.CreateAPI( + "--group", ma.ctx.Group, + "--version", ma.ctx.Version, + "--kind", k, + "--generate-playbook") + pkg.CheckError(logMsgForKind, err) + + k = strings.ToLower(k) + task := fmt.Sprintf("%s_test.yml", k) + logMsgForKind = fmt.Sprintf("removing FIXME assert from %s", task) + log.Infof(logMsgForKind) + err = kbutil.ReplaceInFile( + filepath.Join(ma.ctx.Dir, "molecule", "default", "tasks", task), + fixmeAssert, + "") + pkg.CheckError(logMsgForKind, err) + } +} + +const originalPlaybookFragment = `--- +- hosts: localhost + gather_facts: no + collections: + - kubernetes.core + - operator_sdk.util + tasks: [] +` + +const inventorysampleFragment = `name: inventorytest-sample + annotations: + "ansible.sdk.operatorframework.io/verbosity": "0"` diff --git a/hack/generate/samples/internal/ansible/constants.go b/hack/generate/samples/internal/ansible/constants.go new file mode 100644 index 0000000..d073114 --- /dev/null +++ b/hack/generate/samples/internal/ansible/constants.go @@ -0,0 +1,588 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ansible + +const roleFragment = ` +- name: start memcached + kubernetes.core.k8s: + definition: + kind: Deployment + apiVersion: apps/v1 + metadata: + name: '{{ ansible_operator_meta.name }}-memcached' + namespace: '{{ ansible_operator_meta.namespace }}' + labels: + app: memcached + spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + replicas: "{{size}}" + selector: + matchLabels: + app: memcached + template: + metadata: + labels: + app: memcached + spec: + containers: + - name: memcached + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + command: + - memcached + - -m=64 + - -o + - modern + - -v + image: "docker.io/memcached:1.4.36-alpine" + ports: + - containerPort: 11211 + readinessProbe: + tcpSocket: + port: 11211 + initialDelaySeconds: 3 + periodSeconds: 3 + +- name: Check if config exists + ansible.builtin.stat: + path: /tmp/metricsbumped + register: metricsbumped + +# Only run once +- block: + - ansible.builtin.file: + path: /tmp/metricsbumped + state: touch + # Sanity + - name: create sanity_counter + operator_sdk.util.osdk_metric: + name: sanity_counter + description: ensure counter can be created + counter: {} + + - name: create sanity_gauge + operator_sdk.util.osdk_metric: + name: sanity_gauge + description: ensure gauge can be created + gauge: {} + + - name: create sanity_histogram + operator_sdk.util.osdk_metric: + name: sanity_histogram + description: ensure histogram can be created + histogram: {} + + - name: create sanity_summary + operator_sdk.util.osdk_metric: + name: sanity_summary + description: ensure summary can be created + summary: {} + + # Counter + - name: Counter increment test setup + operator_sdk.util.osdk_metric: + name: counter_inc_test + description: create counter to be incremented + counter: {} + + - name: Execute Counter increment test + operator_sdk.util.osdk_metric: + name: counter_inc_test + description: increment counter + counter: + increment: yes + + - name: Counter add test setup + operator_sdk.util.osdk_metric: + name: counter_add_test + description: create counter to be added to + counter: {} + + - name: Counter add test exe + operator_sdk.util.osdk_metric: + name: counter_add_test + description: create counter to be incremented + counter: + add: 2 + + # Gauge + - name: Gauge set test + operator_sdk.util.osdk_metric: + name: gauge_set_test + description: create and set a gauge t0 5 + gauge: + set: 5 + + - name: Gauge add test setup + operator_sdk.util.osdk_metric: + name: gauge_add_test + description: create a gauge + gauge: {} + + - name: Gauge add test + operator_sdk.util.osdk_metric: + name: gauge_add_test + description: Add 7 to the gauge + gauge: + add: 7 + + - name: Gauge subtract test setup + operator_sdk.util.osdk_metric: + name: gauge_sub_test + description: create a gauge + gauge: {} + + - name: Gauge sub test + operator_sdk.util.osdk_metric: + name: gauge_sub_test + description: Add 7 to the gauge + gauge: + subtract: 7 + + - name: Gauge time test + operator_sdk.util.osdk_metric: + name: gauge_time_test + description: set the gauge to current time + gauge: + set_to_current_time: yes + + # Summary + - name: Summary test setup + operator_sdk.util.osdk_metric: + name: summary_test + description: create a summary + summary: {} + + - name: Summary test + operator_sdk.util.osdk_metric: + name: summary_test + description: observe a summary + summary: + observe: 2 + + # Histogram + - name: Histogram test setup + operator_sdk.util.osdk_metric: + name: histogram_test + description: create a histogram + histogram: {} + + - name: Histogram test + operator_sdk.util.osdk_metric: + name: histogram_test + description: observe a histogram + histogram: + observe: 2 + when: not metricsbumped.stat.exists +` +const defaultsFragment = `size: 1` + +const moleculeTaskFragment = `- name: Load CR + set_fact: + custom_resource: "{{ lookup('template', '/'.join([samples_dir, cr_file])) | from_yaml }}" + vars: + cr_file: 'cache_v1alpha1_memcached.yaml' + +- name: Create the cache.example.com/v1alpha1.Memcached + k8s: + state: present + namespace: '{{ namespace }}' + definition: '{{ custom_resource }}' + wait: yes + wait_timeout: 300 + wait_condition: + type: Successful + status: "True" + +- name: Wait 2 minutes for memcached deployment + debug: + var: deploy + until: + - deploy is defined + - deploy.status is defined + - deploy.status.replicas is defined + - deploy.status.replicas == deploy.status.get("availableReplicas", 0) + retries: 12 + delay: 10 + vars: + deploy: '{{ lookup("k8s", + kind="Deployment", + api_version="apps/v1", + namespace=namespace, + label_selector="app=memcached" + )}}' + +- name: Verify custom status exists + assert: + that: debug_cr.status.get("test") == "hello world" + vars: + debug_cr: '{{ lookup("k8s", + kind=custom_resource.kind, + api_version=custom_resource.apiVersion, + namespace=namespace, + resource_name=custom_resource.metadata.name + )}}' + +- when: molecule_yml.scenario.name == "test-local" + block: + - name: Restart the operator by killing the pod + k8s: + state: absent + definition: + api_version: v1 + kind: Pod + metadata: + namespace: '{{ namespace }}' + name: '{{ pod.metadata.name }}' + vars: + pod: '{{ q("k8s", api_version="v1", kind="Pod", namespace=namespace, label_selector="name=%s").0 }}' + + - name: Wait 2 minutes for operator deployment + debug: + var: deploy + until: + - deploy is defined + - deploy.status is defined + - deploy.status.replicas is defined + - deploy.status.replicas == deploy.status.get("availableReplicas", 0) + retries: 12 + delay: 10 + vars: + deploy: '{{ lookup("k8s", + kind="Deployment", + api_version="apps/v1", + namespace=namespace, + resource_name="%s" + )}}' + + - name: Wait for reconciliation to have a chance at finishing + pause: + seconds: 15 + + - name: Delete the service that is created. + k8s: + kind: Service + api_version: v1 + namespace: '{{ namespace }}' + name: test-service + state: absent + + - name: Verify that test-service was re-created + debug: + var: service + until: service + retries: 12 + delay: 10 + vars: + service: '{{ lookup("k8s", + kind="Service", + api_version="v1", + namespace=namespace, + resource_name="test-service", + )}}' + +- name: Delete the custom resource + k8s: + state: absent + namespace: '{{ namespace }}' + definition: '{{ custom_resource }}' + +- name: Wait for the custom resource to be deleted + k8s_info: + api_version: '{{ custom_resource.apiVersion }}' + kind: '{{ custom_resource.kind }}' + namespace: '{{ namespace }}' + name: '{{ custom_resource.metadata.name }}' + register: cr + retries: 10 + delay: 6 + until: not cr.resources + failed_when: cr.resources + +- name: Verify the Deployment was deleted (wait 30s) + assert: + that: not lookup('k8s', kind='Deployment', api_version='apps/v1', namespace=namespace, label_selector='app=memcached') + retries: 10 + delay: 3 +` + +const memcachedCustomStatusMoleculeTarget = `- name: Verify custom status exists + assert: + that: debug_cr.status.get("test") == "hello world" + vars: + debug_cr: '{{ lookup("k8s", + kind=custom_resource.kind, + api_version=custom_resource.apiVersion, + namespace=namespace, + resource_name=custom_resource.metadata.name + )}}'` + +// false positive: G101: Potential hardcoded credentials (gosec) +// nolint:gosec +const testSecretMoleculeCheck = ` + +# This will verify that the secret role was executed +- name: Verify that test-service was created + assert: + that: lookup('k8s', kind='Service', api_version='v1', namespace=namespace, resource_name='test-service') +` + +const testFooMoleculeCheck = ` + +- name: Verify that project testing-foo was created + assert: + that: lookup('k8s', kind='Namespace', api_version='v1', resource_name='testing-foo') + when: "'project.openshift.io' in lookup('k8s', cluster_info='api_groups')" +` + +// false positive: G101: Potential hardcoded credentials (gosec) +// nolint:gosec +const originalTaskSecret = `--- +# tasks file for Secret +` + +// false positive: G101: Potential hardcoded credentials (gosec) +// nolint:gosec +const taskForSecret = `- name: Create test service + kubernetes.core.k8s: + definition: + kind: Service + api_version: v1 + metadata: + name: test-service + namespace: default + spec: + ports: + - protocol: TCP + port: 8332 + targetPort: 8332 + name: rpc + +` + +// false positive: G101: Potential hardcoded credentials (gosec) +// nolint:gosec +const manageStatusFalseForRoleSecret = `role: secret + manageStatus: false` + +const fixmeAssert = ` +- name: Add assertions here + assert: + that: false + fail_msg: FIXME Add real assertions for your operator +` + +const originaMemcachedMoleculeTask = `- name: Create the cache.example.com/v1alpha1.Memcached + k8s: + state: present + namespace: '{{ namespace }}' + definition: "{{ lookup('template', '/'.join([samples_dir, cr_file])) | from_yaml }}" + wait: yes + wait_timeout: 300 + wait_condition: + type: Successful + status: "True" + vars: + cr_file: 'cache_v1alpha1_memcached.yaml' + +- name: Add assertions here + assert: + that: false + fail_msg: FIXME Add real assertions for your operator` + +const targetMoleculeCheckDeployment = `- name: Wait 2 minutes for memcached deployment + debug: + var: deploy + until: + - deploy is defined + - deploy.status is defined + - deploy.status.replicas is defined + - deploy.status.replicas == deploy.status.get("availableReplicas", 0) + retries: 12 + delay: 10 + vars: + deploy: '{{ lookup("k8s", + kind="Deployment", + api_version="apps/v1", + namespace=namespace, + label_selector="app=memcached" + )}}'` + +const molecuTaskToCheckConfigMap = ` +- name: Create ConfigMap that the Operator should delete + k8s: + definition: + apiVersion: v1 + kind: ConfigMap + metadata: + name: deleteme + namespace: '{{ namespace }}' + data: + delete: me +` + +const memcachedWithBlackListTask = ` +- operator_sdk.util.k8s_status: + api_version: cache.example.com/v1alpha1 + kind: Memcached + name: "{{ ansible_operator_meta.name }}" + namespace: "{{ ansible_operator_meta.namespace }}" + status: + test: "hello world" + +- kubernetes.core.k8s: + definition: + kind: Secret + apiVersion: v1 + metadata: + name: test-secret + namespace: "{{ ansible_operator_meta.namespace }}" + data: + test: aGVsbG8K +- name: Get cluster api_groups + set_fact: + api_groups: "{{ lookup('kubernetes.core.k8s', cluster_info='api_groups', kubeconfig=lookup('env', 'K8S_AUTH_KUBECONFIG')) }}" + +- name: create project if projects are available + kubernetes.core.k8s: + definition: + apiVersion: project.openshift.io/v1 + kind: Project + metadata: + name: testing-foo + when: "'project.openshift.io' in api_groups" + +- name: Create ConfigMap to test blacklisted watches + kubernetes.core.k8s: + definition: + kind: ConfigMap + apiVersion: v1 + metadata: + name: test-blacklist-watches + namespace: "{{ ansible_operator_meta.namespace }}" + data: + arbitrary: afdasdfsajsafj + state: present` + +const taskToDeleteConfigMap = `- name: delete configmap for test + kubernetes.core.k8s: + kind: ConfigMap + api_version: v1 + name: deleteme + namespace: default + state: absent` + +const memcachedWatchCustomizations = `playbook: playbooks/memcached.yml + finalizer: + name: cache.example.com/finalizer + role: memfin + blacklist: + - group: "" + version: v1 + kind: ConfigMap` + +const rolesForBaseOperator = ` + ## + ## Apply customize roles for base operator + ## + - apiGroups: + - "" + resources: + - configmaps + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +#+kubebuilder:scaffold:rules +` + +const customMetricsTest = ` +- name: Search for all running pods + kubernetes.core.k8s_info: + kind: Pod + label_selectors: + - "control-plane = controller-manager" + register: output +- name: Curl the metrics from the manager + kubernetes.core.k8s_exec: + namespace: default + container: manager + pod: "{{ output.resources[0].metadata.name }}" + command: curl localhost:8080/metrics + register: metrics_output + +- name: Assert sanity metrics were created + assert: + that: + - "'sanity_counter 0' in metrics_output.stdout" + - "'sanity_gauge 0' in metrics_output.stdout" + - "'sanity_histogram_bucket' in metrics_output.stdout" + - "'sanity_summary summary' in metrics_output.stdout" + +- name: Assert Counter works as expected + assert: + that: + - "'counter_inc_test 1' in metrics_output.stdout" + - "'counter_add_test 2' in metrics_output.stdout" + +- name: Assert Gauge works as expected + assert: + that: + - "'gauge_set_test 5' in metrics_output.stdout" + - "'gauge_add_test 7' in metrics_output.stdout" + - "'gauge_sub_test -7' in metrics_output.stdout" + # result is epoch time in seconds so the first digit is good until 2033 + - "'gauge_time_test 1' in metrics_output.stdout" + +- name: Assert Summary works as expected + assert: + that: + - "'summary_test_sum 2' in metrics_output.stdout" + +- name: Assert Histogram works as expected + assert: + that: + - "'histogram_test_sum 2' in metrics_output.stdout" + +` + +const watchNamespacePatch = `--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace +` diff --git a/hack/generate/samples/internal/ansible/generate.go b/hack/generate/samples/internal/ansible/generate.go new file mode 100644 index 0000000..2d0321e --- /dev/null +++ b/hack/generate/samples/internal/ansible/generate.go @@ -0,0 +1,23 @@ +// Copyright 2021 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ansible + +import ( + "path/filepath" +) + +func GenerateMemcachedSamples(binaryPath, rootPath string) { + GenerateMemcachedSample(binaryPath, filepath.Join(rootPath, "ansible")) +} diff --git a/hack/generate/samples/internal/ansible/memcached.go b/hack/generate/samples/internal/ansible/memcached.go new file mode 100644 index 0000000..f2d0b37 --- /dev/null +++ b/hack/generate/samples/internal/ansible/memcached.go @@ -0,0 +1,136 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ansible + +import ( + "fmt" + "path/filepath" + "strings" + + log "github.com/sirupsen/logrus" + kbutil "sigs.k8s.io/kubebuilder/v3/pkg/plugin/util" + + "github.com/operator-framework/ansible-operator-plugins/hack/generate/samples/internal/pkg" +) + +// Memcached defines the context for the sample +type Memcached struct { + ctx *pkg.SampleContext +} + +// GenerateMemcachedSample will call all actions to create the directory and generate the sample +// The Context to run the samples are not the same in the e2e test. In this way, note that it should NOT +// be called in the e2e tests since it will call the Prepare() to set the sample context and generate the files +// in the testdata directory. The e2e tests only ought to use the Run() method with the TestContext. +func GenerateMemcachedSample(binaryPath, samplesPath string) { + ctx, err := pkg.NewSampleContext(binaryPath, filepath.Join(samplesPath, "memcached-operator"), + "GO111MODULE=on") + pkg.CheckError("generating Ansible memcached context", err) + + memcached := Memcached{&ctx} + memcached.Prepare() + memcached.Run() +} + +// Prepare the Context for the Memcached Ansible Sample +// Note that sample directory will be re-created and the context data for the sample +// will be set such as the domain and GVK. +func (ma *Memcached) Prepare() { + log.Infof("destroying directory for memcached Ansible samples") + ma.ctx.Destroy() + + log.Infof("creating directory") + err := ma.ctx.Prepare() + pkg.CheckError("creating directory for Ansible Sample", err) + + log.Infof("setting domain and GVK") + ma.ctx.Domain = "example.com" + ma.ctx.Version = "v1alpha1" + ma.ctx.Group = "cache" + ma.ctx.Kind = "Memcached" +} + +// Run the steps to create the Memcached Ansible Sample +func (ma *Memcached) Run() { + log.Infof("creating the project") + err := ma.ctx.Init( + "--plugins", "ansible", + "--group", ma.ctx.Group, + "--version", ma.ctx.Version, + "--kind", ma.ctx.Kind, + "--domain", ma.ctx.Domain, + "--generate-role", + "--generate-playbook") + pkg.CheckError("creating the project", err) + + log.Infof("customizing the sample") + err = kbutil.UncommentCode( + filepath.Join(ma.ctx.Dir, "config", "default", "kustomization.yaml"), + "#- ../prometheus", "#") + pkg.CheckError("enabling prometheus metrics", err) + + err = ma.ctx.UncommentRestrictivePodStandards() + pkg.CheckError("creating the bundle", err) + + ma.addingAnsibleTask() + ma.addingMoleculeMockData() + + log.Infof("creating the bundle") + err = ma.ctx.GenerateBundle() + pkg.CheckError("creating the bundle", err) + + log.Infof("striping bundle annotations") + err = ma.ctx.StripBundleAnnotations() + pkg.CheckError("striping bundle annotations", err) + + log.Infof("setting createdAt annotation") + csv := filepath.Join(ma.ctx.Dir, "bundle", "manifests", ma.ctx.ProjectName+".clusterserviceversion.yaml") + err = kbutil.ReplaceRegexInFile(csv, "createdAt:.*", createdAt) + pkg.CheckError("setting createdAt annotation", err) +} + +// addingMoleculeMockData will customize the molecule data +func (ma *Memcached) addingMoleculeMockData() { + log.Infof("adding molecule test for Ansible task") + moleculeTaskPath := filepath.Join(ma.ctx.Dir, "molecule", "default", "tasks", + fmt.Sprintf("%s_test.yml", strings.ToLower(ma.ctx.Kind))) + + err := kbutil.ReplaceInFile(moleculeTaskPath, + originaMemcachedMoleculeTask, fmt.Sprintf(moleculeTaskFragment, ma.ctx.ProjectName, ma.ctx.ProjectName)) + pkg.CheckError("replacing molecule default tasks", err) +} + +// addingAnsibleTask will add the Ansible Task and update the sample +func (ma *Memcached) addingAnsibleTask() { + log.Infof("adding Ansible task and variable") + err := kbutil.InsertCode(filepath.Join(ma.ctx.Dir, "roles", strings.ToLower(ma.ctx.Kind), + "tasks", "main.yml"), + fmt.Sprintf("# tasks file for %s", ma.ctx.Kind), + roleFragment) + pkg.CheckError("adding task", err) + + err = kbutil.ReplaceInFile(filepath.Join(ma.ctx.Dir, "roles", strings.ToLower(ma.ctx.Kind), + "defaults", "main.yml"), + fmt.Sprintf("# defaults file for %s", ma.ctx.Kind), + defaultsFragment) + pkg.CheckError("adding defaulting", err) + + err = kbutil.ReplaceInFile(filepath.Join(ma.ctx.Dir, "config", "samples", + fmt.Sprintf("%s_%s_%s.yaml", ma.ctx.Group, ma.ctx.Version, strings.ToLower(ma.ctx.Kind))), + "# TODO(user): Add fields here", "size: 1") + pkg.CheckError("updating sample CR", err) +} + +const createdAt = `createdAt: "2022-11-08T17:26:37Z"` diff --git a/hack/generate/samples/internal/ansible/memcached_molecule.go b/hack/generate/samples/internal/ansible/memcached_molecule.go new file mode 100644 index 0000000..053c76a --- /dev/null +++ b/hack/generate/samples/internal/ansible/memcached_molecule.go @@ -0,0 +1,210 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ansible + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + log "github.com/sirupsen/logrus" + kbutil "sigs.k8s.io/kubebuilder/v3/pkg/plugin/util" + + "github.com/operator-framework/ansible-operator-plugins/hack/generate/samples/internal/pkg" +) + +// MemcachedMolecule defines the context for the sample +type MemcachedMolecule struct { + ctx *pkg.SampleContext + Base Memcached +} + +// GenerateMoleculeSample will call all actions to create the directory and generate the sample +// The Context to run the samples are not the same in the e2e test. In this way, note that it should NOT +// be called in the e2e tests since it will call the Prepare() to set the sample context and generate the files +// in the testdata directory. The e2e tests only ought to use the Run() method with the TestContext. +func GenerateMoleculeSample(binaryPath, samplesPath string) { + ctx, err := pkg.NewSampleContext(binaryPath, filepath.Join(samplesPath, "memcached-molecule-operator"), + "GO111MODULE=on") + pkg.CheckError("generating Ansible Moleule memcached context", err) + + molecule := MemcachedMolecule{ctx: &ctx, Base: Memcached{&ctx}} + molecule.Prepare() + molecule.Run() +} + +// Prepare the Context for the Memcached Ansible Sample +// Note that sample directory will be re-created and the context data for the sample +// will be set such as the domain and GVK. +func (ma *MemcachedMolecule) Prepare() { + log.Infof("destroying directory for memcached Ansible samples") + ma.ctx.Destroy() + + log.Infof("creating directory") + err := ma.ctx.Prepare() + pkg.CheckError("creating directory for Ansible Sample", err) + + log.Infof("setting domain and GVK") + ma.ctx.Domain = "example.com" + ma.ctx.Version = "v1alpha1" + ma.ctx.Group = "cache" + ma.ctx.Kind = "Memcached" +} + +// Run the steps to create the Memcached Ansible Sample +func (ma *MemcachedMolecule) Run() { + ma.Base.Run() + + moleculeTaskPath := filepath.Join(ma.ctx.Dir, "molecule", "default", "tasks", + fmt.Sprintf("%s_test.yml", strings.ToLower(ma.ctx.Kind))) + + log.Infof("insert molecule task to ensure that ConfigMap will be deleted") + err := kbutil.InsertCode(moleculeTaskPath, targetMoleculeCheckDeployment, molecuTaskToCheckConfigMap) + pkg.CheckError("replacing memcached task to add config map check", err) + + log.Infof("insert molecule task to ensure to check secret") + err = kbutil.InsertCode(moleculeTaskPath, memcachedCustomStatusMoleculeTarget, testSecretMoleculeCheck) + pkg.CheckError("replacing memcached task to add secret check", err) + + log.Infof("insert molecule task to ensure to foo ") + err = kbutil.InsertCode(moleculeTaskPath, testSecretMoleculeCheck, testFooMoleculeCheck) + pkg.CheckError("replacing memcached task to add foo check", err) + + log.Infof("insert molecule task to check custom metrics") + err = kbutil.InsertCode(moleculeTaskPath, testFooMoleculeCheck, customMetricsTest) + + pkg.CheckError("replacing memcached task to add foo check", err) + + log.Infof("replacing project Dockerfile to use ansible base image with the dev tag") + err = kbutil.ReplaceRegexInFile(filepath.Join(ma.ctx.Dir, "Dockerfile"), "quay.io/operator-framework/ansible-operator:.*", "quay.io/operator-framework/ansible-operator:dev") + pkg.CheckError("replacing Dockerfile", err) + + log.Infof("adding RBAC permissions") + err = kbutil.ReplaceInFile(filepath.Join(ma.ctx.Dir, "config", "rbac", "role.yaml"), + "#+kubebuilder:scaffold:rules", rolesForBaseOperator) + pkg.CheckError("replacing in role.yml", err) + + log.Infof("adding Memcached mock task to the role with black list") + err = kbutil.InsertCode(filepath.Join(ma.ctx.Dir, "roles", strings.ToLower(ma.ctx.Kind), "tasks", "main.yml"), + roleFragment, memcachedWithBlackListTask) + pkg.CheckError("replacing in tasks/main.yml", err) + + log.Infof("creating an API definition Foo") + err = ma.ctx.CreateAPI( + "--group", ma.ctx.Group, + "--version", ma.ctx.Version, + "--kind", "Foo", + "--generate-role") + pkg.CheckError("creating api", err) + + log.Infof("updating spec of foo sample") + err = kbutil.ReplaceInFile( + filepath.Join(ma.ctx.Dir, "config", "samples", "cache_v1alpha1_foo.yaml"), + "# TODO(user): Add fields here", + "foo: bar") + pkg.CheckError("updating spec of cache_v1alpha1_foo.yaml", err) + + log.Infof("creating an API definition to add a task to delete the config map") + err = ma.ctx.CreateAPI( + "--group", ma.ctx.Group, + "--version", ma.ctx.Version, + "--kind", "Memfin", + "--generate-role") + pkg.CheckError("creating api", err) + + log.Infof("updating spec of Memfin sample") + err = kbutil.ReplaceInFile( + filepath.Join(ma.ctx.Dir, "config", "samples", "cache_v1alpha1_memfin.yaml"), + "# TODO(user): Add fields here", + "foo: bar") + pkg.CheckError("updating spec of cache_v1alpha1_memfin.yaml ", err) + + log.Infof("adding task to delete config map") + err = kbutil.ReplaceInFile(filepath.Join(ma.ctx.Dir, "roles", "memfin", "tasks", "main.yml"), + "# tasks file for Memfin", taskToDeleteConfigMap) + pkg.CheckError("replacing in tasks/main.yml", err) + + log.Infof("adding to watches finalizer and blacklist") + err = kbutil.ReplaceInFile(filepath.Join(ma.ctx.Dir, "watches.yaml"), + "playbook: playbooks/memcached.yml", memcachedWatchCustomizations) + pkg.CheckError("replacing in watches", err) + + log.Infof("enabling multigroup support") + err = ma.ctx.AllowProjectBeMultiGroup() + pkg.CheckError("updating PROJECT file", err) + + log.Infof("creating core Secret API") + err = ma.ctx.CreateAPI( + // the tool do not allow we crate an API with a group nil for v2+ + // which is required here to mock the tests. + // however, it is done already for v3+. More info: https://github.com/kubernetes-sigs/kubebuilder/issues/1404 + // and the tests should be changed when the tool allows we create API's for core types. + // todo: replace the ignore value when the tool provide a solution for it. + "--group", "ignore", + "--version", "v1", + "--kind", "Secret", + "--generate-role") + pkg.CheckError("creating api", err) + + log.Infof("updating spec of ignore sample") + err = kbutil.ReplaceInFile( + filepath.Join(ma.ctx.Dir, "config", "samples", "ignore_v1_secret.yaml"), + "# TODO(user): Add fields here", + "foo: bar") + pkg.CheckError("updating spec of ignore_v1_secret.yaml", err) + + log.Infof("removing ignore group for the secret from watches as an workaround to work with core types") + err = kbutil.ReplaceInFile(filepath.Join(ma.ctx.Dir, "watches.yaml"), + "ignore.example.com", "\"\"") + pkg.CheckError("replacing the watches file", err) + + log.Infof("removing molecule test for the Secret since it is a core type") + cmd := exec.Command("rm", "-rf", filepath.Join(ma.ctx.Dir, "molecule", "default", "tasks", "secret_test.yml")) + _, err = ma.ctx.Run(cmd) + pkg.CheckError("removing secret test file", err) + + log.Infof("adding Secret task to the role") + err = kbutil.ReplaceInFile(filepath.Join(ma.ctx.Dir, "roles", "secret", "tasks", "main.yml"), + originalTaskSecret, taskForSecret) + pkg.CheckError("replacing in secret/tasks/main.yml file", err) + + log.Infof("adding ManageStatus == false for role secret") + err = kbutil.ReplaceInFile(filepath.Join(ma.ctx.Dir, "watches.yaml"), + "role: secret", manageStatusFalseForRoleSecret) + pkg.CheckError("replacing in watches.yaml", err) + + // prevent high load of controller caused by watching all the secrets in the cluster + watchNamespacePatchFileName := "watch_namespace_patch.yaml" + log.Info("adding WATCH_NAMESPACE env patch to watch own namespace") + err = os.WriteFile(filepath.Join(ma.ctx.Dir, "config", "testing", watchNamespacePatchFileName), []byte(watchNamespacePatch), 0644) + pkg.CheckError("adding watch_namespace_patch.yaml", err) + + log.Info("adding WATCH_NAMESPACE env patch to patch list to be applied") + err = kbutil.InsertCode(filepath.Join(ma.ctx.Dir, "config", "testing", "kustomization.yaml"), "patchesStrategicMerge:", + fmt.Sprintf("\n- %s", watchNamespacePatchFileName)) + pkg.CheckError("inserting in kustomization.yaml", err) + + log.Infof("removing FIXME asserts from memfin_test.yml") + err = kbutil.ReplaceInFile(filepath.Join(ma.ctx.Dir, "molecule", "default", "tasks", "memfin_test.yml"), + fixmeAssert, "") + pkg.CheckError("replacing memfin_test.yml", err) + + log.Infof("removing FIXME asserts from foo_test.yml") + err = kbutil.ReplaceInFile(filepath.Join(ma.ctx.Dir, "molecule", "default", "tasks", "foo_test.yml"), + fixmeAssert, "") + pkg.CheckError("replacing foo_test.yml", err) +} diff --git a/hack/generate/samples/internal/ansible/testdata/ansible.cfg b/hack/generate/samples/internal/ansible/testdata/ansible.cfg new file mode 100644 index 0000000..4dcc9a9 --- /dev/null +++ b/hack/generate/samples/internal/ansible/testdata/ansible.cfg @@ -0,0 +1,10 @@ +[defaults] +inventory_plugins = /opt/ansible/plugins/inventory +stdout_callback = yaml +callback_whitelist = profile_tasks,timer +module_utils = /opt/ansible/module_utils +roles_path = /opt/ansible/roles +library = /opt/ansible/library +inventory = /opt/ansible/inventory +filter_plugins = /opt/ansible/plugins/filter +remote_tmp = /tmp/ansible diff --git a/hack/generate/samples/internal/ansible/testdata/fixture_collection/galaxy.yml b/hack/generate/samples/internal/ansible/testdata/fixture_collection/galaxy.yml new file mode 100644 index 0000000..0f2699d --- /dev/null +++ b/hack/generate/samples/internal/ansible/testdata/fixture_collection/galaxy.yml @@ -0,0 +1,6 @@ +namespace: operator_sdk +name: test_fixtures +version: 0.0.0 +readme: README.md +authors: +- your name diff --git a/hack/generate/samples/internal/ansible/testdata/fixture_collection/roles/dummy/tasks/main.yml b/hack/generate/samples/internal/ansible/testdata/fixture_collection/roles/dummy/tasks/main.yml new file mode 100644 index 0000000..4543ce2 --- /dev/null +++ b/hack/generate/samples/internal/ansible/testdata/fixture_collection/roles/dummy/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- name: Create ConfigMap + kubernetes.core.k8s: + definition: + kind: ConfigMap + apiVersion: v1 + metadata: + name: test-this-collection + namespace: "{{ meta.namespace }}" + data: + did_it_work: "indeed" + state: present diff --git a/hack/generate/samples/internal/ansible/testdata/inventory/group_vars/test.yml b/hack/generate/samples/internal/ansible/testdata/inventory/group_vars/test.yml new file mode 100644 index 0000000..b89764e --- /dev/null +++ b/hack/generate/samples/internal/ansible/testdata/inventory/group_vars/test.yml @@ -0,0 +1,3 @@ +--- + +sentinel: test diff --git a/hack/generate/samples/internal/ansible/testdata/inventory/hosts b/hack/generate/samples/internal/ansible/testdata/inventory/hosts new file mode 100644 index 0000000..fd2ec1a --- /dev/null +++ b/hack/generate/samples/internal/ansible/testdata/inventory/hosts @@ -0,0 +1,5 @@ +[test] +127.0.0.1 ansible_connection=local + +[all:vars] +ansible_python_interpreter=/usr/bin/python3 \ No newline at end of file diff --git a/hack/generate/samples/internal/ansible/testdata/playbooks/finalizerconcurrencyfinalizer.yml b/hack/generate/samples/internal/ansible/testdata/playbooks/finalizerconcurrencyfinalizer.yml new file mode 100644 index 0000000..2cc57f3 --- /dev/null +++ b/hack/generate/samples/internal/ansible/testdata/playbooks/finalizerconcurrencyfinalizer.yml @@ -0,0 +1,34 @@ +--- +- hosts: localhost + gather_facts: no + collections: + - kubernetes.core + - operator_sdk.util + + tasks: + - debug: + msg: "Pausing until configmap exists" + + - name: Wait for configmap + k8s_info: + api_version: v1 + kind: ConfigMap + name: finalizer-concurrency-results + namespace: osdk-test + wait: yes + wait_sleep: 10 + wait_timeout: 30 + + - name: Update configmap + k8s: + state: present + force: yes + definition: + apiVersion: v1 + kind: ConfigMap + metadata: + name: finalizer-concurrency-results + namespace: osdk-test + data: + finalizer: "success" + wait: yes diff --git a/hack/generate/samples/internal/ansible/testdata/plugins/filter/test.py b/hack/generate/samples/internal/ansible/testdata/plugins/filter/test.py new file mode 100644 index 0000000..2058f71 --- /dev/null +++ b/hack/generate/samples/internal/ansible/testdata/plugins/filter/test.py @@ -0,0 +1,17 @@ +#!/usr/bin/python3 + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +def test(sentinel): + return sentinel == 'test' + + +class FilterModule(object): + ''' Fake test plugin for ansible-operator ''' + + def filters(self): + return { + 'test': test + } diff --git a/hack/generate/samples/internal/ansible/testdata/secret.yml b/hack/generate/samples/internal/ansible/testdata/secret.yml new file mode 100644 index 0000000..30374b8 --- /dev/null +++ b/hack/generate/samples/internal/ansible/testdata/secret.yml @@ -0,0 +1,22 @@ +--- +- hosts: localhost + gather_facts: no + collections: + - kubernetes.core + + tasks: + - meta: end_play + when: not (__secret.metadata.get('labels', {}).reconcile|default(false)|bool) + + # This is for testing, but never do this with real secrets + - name: Populate configmap with contents of secret + k8s: + definition: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: '{{ meta.name }}' + namespace: '{{ meta.namespace }}' + data: + '{{ item.key }}': '{{ item.value | b64decode }}' + with_dict: '{{ __secret.data }}' diff --git a/hack/generate/samples/internal/ansible/testdata/tasks/argstest_test.yml b/hack/generate/samples/internal/ansible/testdata/tasks/argstest_test.yml new file mode 100644 index 0000000..180e1ff --- /dev/null +++ b/hack/generate/samples/internal/ansible/testdata/tasks/argstest_test.yml @@ -0,0 +1,25 @@ +--- +- name: Create the test.example.com/v1alpha1.ArgsTest + k8s: + state: present + definition: + apiVersion: test.example.com/v1alpha1 + kind: ArgsTest + metadata: + name: args-test + namespace: '{{ namespace }}' + spec: + field: value + wait: yes + wait_timeout: 300 + wait_condition: + type: Successful + status: "True" + register: args_test + +- name: Assert sentinel ConfigMap has been created for Molecule Test + assert: + that: cm.data.msg == "The decrypted value is thisisatest" + vars: + cm: "{{ q('k8s', api_version='v1', kind='ConfigMap', namespace=namespace, + resource_name='args-test').0 }}" diff --git a/hack/generate/samples/internal/ansible/testdata/tasks/casetest_test.yml b/hack/generate/samples/internal/ansible/testdata/tasks/casetest_test.yml new file mode 100644 index 0000000..427a440 --- /dev/null +++ b/hack/generate/samples/internal/ansible/testdata/tasks/casetest_test.yml @@ -0,0 +1,24 @@ +--- +- name: Create the test.example.com/v1alpha1.CaseTest + k8s: + state: present + definition: + apiVersion: test.example.com/v1alpha1 + kind: CaseTest + metadata: + name: case-test + namespace: '{{ namespace }}' + spec: + camelCaseVar: "true" + wait: yes + wait_timeout: 300 + wait_condition: + type: Successful + status: "True" + register: case_test + +- name: Assert sentinel ConfigMap has been created for Molecule Test + assert: + that: cm.data.shouldBeCamel == 'true' + vars: + cm: "{{ q('k8s', api_version='v1', kind='ConfigMap', namespace=namespace, resource_name='case-test').0 }}" diff --git a/hack/generate/samples/internal/ansible/testdata/tasks/clusterannotationtest_test.yml b/hack/generate/samples/internal/ansible/testdata/tasks/clusterannotationtest_test.yml new file mode 100644 index 0000000..668c989 --- /dev/null +++ b/hack/generate/samples/internal/ansible/testdata/tasks/clusterannotationtest_test.yml @@ -0,0 +1,51 @@ +--- +- name: Create the test.example.com/v1alpha1.ClusterAnnotationTest + k8s: + state: present + namespace: '{{ namespace }}' + definition: "{{ lookup('template', '/'.join([samples_dir, cr_file])) | from_yaml }}" + wait: yes + wait_timeout: 300 + wait_condition: + type: Successful + status: "True" + vars: + cr_file: 'test_v1alpha1_clusterannotationtest.yaml' + +- name: retrieve configmap + k8s_info: + api_version: v1 + kind: ConfigMap + namespace: "externalnamespace" + name: "clusterannotationtest-sample" + register: configmap + until: (configmap.resources | length) == 1 + +- assert: + that: + - configmap.resources[0].metadata.annotations["operator-sdk/primary-resource"] == primary + - configmap.resources[0].metadata.annotations["operator-sdk/primary-resource-type"] == primary_type + vars: + primary: "osdk-test/clusterannotationtest-sample" + primary_type: "ClusterAnnotationTest.test.example.com" + +- name: change the namespace labels + k8s: + name: "externalnamespace" + api_version: v1 + kind: "Namespace" + wait: yes + definition: + metadata: + labels: + foo: baz + +- name: Make sure the label is changed back + k8s_info: + api_version: v1 + kind: Namespace + name: "externalnamespace" + register: external_namespace + until: external_namespace.resources[0].metadata.labels["foo"] == "bar" + retries: 6 + diff --git a/hack/generate/samples/internal/ansible/testdata/tasks/collectiontest_test.yml b/hack/generate/samples/internal/ansible/testdata/tasks/collectiontest_test.yml new file mode 100644 index 0000000..9623d2a --- /dev/null +++ b/hack/generate/samples/internal/ansible/testdata/tasks/collectiontest_test.yml @@ -0,0 +1,26 @@ +--- +- name: Create the test.example.com/v1alpha1.CollectionTest + k8s: + state: present + namespace: '{{ namespace }}' + definition: + apiVersion: test.example.com/v1alpha1 + kind: CollectionTest + metadata: + name: collection-test + wait: yes + wait_timeout: 300 + wait_condition: + type: Successful + status: "True" + +- name: Assert ConfigMap has been created by collection Role + assert: + that: cm.data.did_it_work == 'indeed' + vars: + cm: "{{ q('k8s', + api_version='v1', + kind='ConfigMap', + namespace=namespace, + resource_name='test-this-collection' + ).0 }}" diff --git a/hack/generate/samples/internal/ansible/testdata/tasks/finalizerconcurrencytest_test.yml b/hack/generate/samples/internal/ansible/testdata/tasks/finalizerconcurrencytest_test.yml new file mode 100644 index 0000000..8131669 --- /dev/null +++ b/hack/generate/samples/internal/ansible/testdata/tasks/finalizerconcurrencytest_test.yml @@ -0,0 +1,58 @@ +--- +# TODO(asmacdo) this should be the only task. the other is getting magiced in +- name: Create the test.example.com/v1alpha1.FinalizerConcurrencyTest + k8s: + state: present + definition: + apiVersion: test.example.com/v1alpha1 + kind: FinalizerConcurrencyTest + metadata: + name: finalizer-concurrency-test + namespace: '{{ namespace }}' + wait: no + +- name: While reconcile is paused, delete the CR + k8s: + state: absent + definition: + apiVersion: test.example.com/v1alpha1 + kind: FinalizerConcurrencyTest + metadata: + name: finalizer-concurrency-test + namespace: '{{ namespace }}' + wait: no + +- name: Create a configmap to allow reconciliation to unpause + k8s: + state: present + definition: + apiVersion: v1 + kind: ConfigMap + metadata: + name: finalizer-concurrency-results + namespace: osdk-test + wait: no + +- name: Wait for the custom resource to be deleted + k8s_info: + api_version: test.example.com/v1alpha1 + kind: FinalizerConcurrencyTest + namespace: osdk-test # TODO(asmacdo) Fixme + name: finalizer-concurrency-test + register: cr + retries: 10 + delay: 6 + until: not cr.resources + failed_when: cr.resources + +- name: Retrive the cm + k8s_info: + api_version: v1 + kind: ConfigMap + name: finalizer-concurrency-results + namespace: osdk-test + register: finalizer_test + +- name: Assert that finalizer ran + assert: + that: finalizer_test.resources.0.data.finalizer== 'success' diff --git a/hack/generate/samples/internal/ansible/testdata/tasks/inventorytest_test.yml b/hack/generate/samples/internal/ansible/testdata/tasks/inventorytest_test.yml new file mode 100644 index 0000000..649713e --- /dev/null +++ b/hack/generate/samples/internal/ansible/testdata/tasks/inventorytest_test.yml @@ -0,0 +1,22 @@ +--- +- name: Create the test.example.com/v1alpha1.InventoryTest + k8s: + state: present + namespace: '{{ namespace }}' + definition: '{{ custom_resource }}' + wait: yes + wait_timeout: 300 + wait_condition: + type: Successful + status: "True" + vars: + custom_resource: "{{ lookup('template', '/'.join([ + config_dir, + 'samples/test_v1alpha1_inventorytest.yaml' + ])) | from_yaml }}" + +- name: Assert sentinel ConfigMap has been created for Molecule Test + assert: + that: cm.data.sentinel == 'test' + vars: + cm: "{{ q('k8s', api_version='v1', kind='ConfigMap', namespace=namespace, resource_name='inventory-cm').0 }}" diff --git a/hack/generate/samples/internal/ansible/testdata/tasks/reconciliationtest_test.yml b/hack/generate/samples/internal/ansible/testdata/tasks/reconciliationtest_test.yml new file mode 100644 index 0000000..b415b57 --- /dev/null +++ b/hack/generate/samples/internal/ansible/testdata/tasks/reconciliationtest_test.yml @@ -0,0 +1,32 @@ +--- +- name: Create the test.example.com/v1alpha1.ReconciliationTest + k8s: + state: present + definition: + apiVersion: test.example.com/v1alpha1 + kind: ReconciliationTest + metadata: + name: reconciliation-test + namespace: '{{ namespace }}' + spec: + field: value + wait: yes + wait_timeout: 300 + wait_condition: + type: Successful + status: "True" + register: reconciliation_test +- name: Retreive the number of iterations on the ConfigMap + debug: var=cm.data.iterations + retries: 20 + delay: 2 + until: "cm.data.iterations|int == 5" + vars: + cm: "{{ q('k8s', api_version='v1', kind='ConfigMap', namespace=namespace, + resource_name='reconciliation-test').0 }}" +- name: Assert sentinel ConfigMap has been created for Molecule Test + assert: + that: "cm.data.iterations|int == 5" + vars: + cm: "{{ q('k8s', api_version='v1', kind='ConfigMap', namespace=namespace, + resource_name='reconciliation-test').0 }}" diff --git a/hack/generate/samples/internal/ansible/testdata/tasks/secretstest_test.yml b/hack/generate/samples/internal/ansible/testdata/tasks/secretstest_test.yml new file mode 100644 index 0000000..70c1bc9 --- /dev/null +++ b/hack/generate/samples/internal/ansible/testdata/tasks/secretstest_test.yml @@ -0,0 +1,56 @@ +--- +- name: Create the v1.Secret + k8s: + state: present + definition: + apiVersion: v1 + kind: Secret + metadata: + name: test-secret + namespace: '{{ namespace }}' + labels: + reconcile: "yes" + data: + test: '{{ "test" | b64encode }}' + +- name: Wait for the corresponding configmap to be created + k8s_info: + api_version: v1 + kind: ConfigMap + name: test-secret + namespace: '{{ namespace }}' + register: result + until: result.resources + retries: 20 + +- name: Assert that the configmap has the proper content + assert: + that: result.resources.0.data.test == "test" + +- name: Update the v1.Secret + k8s: + state: present + definition: + apiVersion: v1 + kind: Secret + metadata: + name: test-secret + namespace: '{{ namespace }}' + labels: + reconcile: "yes" + data: + new: '{{ "content" | b64encode }}' + +- name: Wait for the corresponding key to be created + k8s_info: + api_version: v1 + kind: ConfigMap + name: test-secret + namespace: '{{ namespace }}' + register: result + until: result.resources.0.data.new is defined + retries: 20 + +- name: Assert that the configmap has the proper content + assert: + that: result.resources.0.data.new == 'content' diff --git a/hack/generate/samples/internal/ansible/testdata/tasks/selectortest_test.yml b/hack/generate/samples/internal/ansible/testdata/tasks/selectortest_test.yml new file mode 100644 index 0000000..e766d87 --- /dev/null +++ b/hack/generate/samples/internal/ansible/testdata/tasks/selectortest_test.yml @@ -0,0 +1,51 @@ +--- +- name: Create the test.example.com/v1alpha1.SelectorTest + k8s: + state: present + definition: + apiVersion: test.example.com/v1alpha1 + kind: SelectorTest + metadata: + name: selector-test + namespace: '{{ namespace }}' + labels: + testLabel: testValue + spec: + field: value + wait: yes + wait_timeout: 300 + wait_condition: + type: Successful + status: "True" + register: selector_test + +- name: Assert sentinel ConfigMap has been created for Molecule Test + assert: + that: cm.data.hello == 'world' + vars: + cm: "{{ q('k8s', api_version='v1', kind='ConfigMap', namespace=namespace, + resource_name='selector-test').0 }}" + +- name: Create the test.example.com/v1alpha1.SelectorTest + k8s: + state: present + definition: + apiVersion: test.example.com/v1alpha1 + kind: SelectorTest + metadata: + name: selector-test-fail + namespace: '{{ namespace }}' + spec: + field: value + register: selector_test + +- name: Wait for 30 seconds + wait_for: + timeout: 30 + +- name: Assert sentinel ConfigMap has not been created for Molecule Test + assert: + that: not cm + vars: + cm: "{{ q('k8s', api_version='v1', kind='ConfigMap', namespace=namespace, + resource_name='selector-test-fail')}}" diff --git a/hack/generate/samples/internal/ansible/testdata/tasks/subresourcestest_test.yml b/hack/generate/samples/internal/ansible/testdata/tasks/subresourcestest_test.yml new file mode 100644 index 0000000..7a12b94 --- /dev/null +++ b/hack/generate/samples/internal/ansible/testdata/tasks/subresourcestest_test.yml @@ -0,0 +1,26 @@ +--- +- name: Create the test.example.com/v1alpha1.SubresourcesTest + k8s: + state: present + definition: + apiVersion: test.example.com/v1alpha1 + kind: SubresourcesTest + metadata: + name: subresources-test + namespace: '{{ namespace }}' + spec: + execCommand: "echo 'hello world'" + logMessage: "Running..." + wait: yes + wait_timeout: 300 + wait_condition: + type: Successful + status: "True" + register: subresources_test + +- name: Assert stdout and stderr are properly set in status + assert: + that: + - subresources_test.result.status.execCommandStderr == "" + - subresources_test.result.status.execCommandStdout == "hello world" + - "'Running' in subresources_test.result.status.logs" diff --git a/hack/generate/samples/internal/ansible/testdata/watches.yaml b/hack/generate/samples/internal/ansible/testdata/watches.yaml new file mode 100644 index 0000000..1adfbd9 --- /dev/null +++ b/hack/generate/samples/internal/ansible/testdata/watches.yaml @@ -0,0 +1,84 @@ +--- +# Use the 'create api' subcommand to add watches to this file. +- version: v1alpha1 + group: test.example.com + kind: InventoryTest + playbook: playbooks/inventorytest.yml + vars: + meta: '{{ ansible_operator_meta }}' + +- version: v1alpha1 + group: test.example.com + kind: CollectionTest + role: operator_sdk.test_fixtures.dummy + vars: + meta: '{{ ansible_operator_meta }}' + +- version: v1alpha1 + group: test.example.com + kind: SubresourcesTest + playbook: playbooks/subresourcestest.yml + vars: + meta: '{{ ansible_operator_meta }}' + +- version: v1 + group: "" + kind: Secret + playbook: playbooks/secret.yml + manageStatus: false + selector: + matchExpressions: + - {key: reconcile, operator: Exists, values: []} + vars: + meta: '{{ ansible_operator_meta }}' + +- version: v1alpha1 + group: test.example.com + kind: SelectorTest + playbook: playbooks/selectortest.yml + selector: + matchExpressions: + - {key: testLabel, operator: Exists, values: []} + vars: + meta: '{{ ansible_operator_meta }}' + +- version: v1alpha1 + group: test.example.com + kind: CaseTest + playbook: playbooks/casetest.yml + snakeCaseParameters: false + vars: + meta: '{{ ansible_operator_meta }}' + +- version: v1alpha1 + group: test.example.com + kind: ArgsTest + playbook: playbooks/argstest.yml + vars: + meta: '{{ ansible_operator_meta }}' + +- version: v1alpha1 + group: test.example.com + kind: ReconciliationTest + playbook: playbooks/reconciliationtest.yml + vars: + meta: '{{ ansible_operator_meta }}' + +- version: v1alpha1 + group: test.example.com + kind: ClusterAnnotationTest + playbook: playbooks/clusterannotationtest.yml + watchClusterScopedResources: true + vars: + meta: '{{ ansible_operator_meta }}' + +- version: v1alpha1 + group: test.example.com + kind: FinalizerConcurrencyTest + playbook: playbooks/finalizerconcurrencytest.yml + finalizer: + name: test.example.com/finalizer + playbook: playbooks/finalizerconcurrencyfinalizer.yml + vars: + meta: '{{ ansible_operator_meta }}' +#+kubebuilder:scaffold:watch diff --git a/hack/generate/samples/internal/pkg/context.go b/hack/generate/samples/internal/pkg/context.go new file mode 100644 index 0000000..7df4d00 --- /dev/null +++ b/hack/generate/samples/internal/pkg/context.go @@ -0,0 +1,30 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pkg + +import ( + "github.com/operator-framework/ansible-operator-plugins/internal/testutils" +) + +// SampleContext represents the Context used to generate the samples +type SampleContext struct { + testutils.TestContext +} + +// NewSampleContext returns a SampleContext containing a new kubebuilder TestContext. +func NewSampleContext(binary string, path string, env ...string) (s SampleContext, err error) { + s.TestContext, err = testutils.NewPartialTestContext(binary, path, env...) + return s, err +} diff --git a/hack/generate/samples/internal/pkg/utils.go b/hack/generate/samples/internal/pkg/utils.go new file mode 100644 index 0000000..a1a1684 --- /dev/null +++ b/hack/generate/samples/internal/pkg/utils.go @@ -0,0 +1,89 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pkg + +import ( + "fmt" + "os" + "path/filepath" + "regexp" + + log "github.com/sirupsen/logrus" + + "github.com/operator-framework/ansible-operator-plugins/internal/annotations/metrics" +) + +// CheckError will exit with exit code 1 when err is not nil. +func CheckError(msg string, err error) { + if err != nil { + log.Errorf("error %s: %s", msg, err) + os.Exit(1) + } +} + +// StripBundleAnnotations removes all annotations applied to bundle manifests and metadata +// by operator-sdk/internal/annotations/metrics annotators. Doing so decouples samples +// from which operator-sdk version they were build with, as this information is already +// available in git history. +func (ctx SampleContext) StripBundleAnnotations() (err error) { + // Remove metadata labels. + metadataAnnotations := metrics.MakeBundleMetadataLabels("") + metadataFiles := []string{ + filepath.Join(ctx.Dir, "bundle", "metadata", "annotations.yaml"), + filepath.Join(ctx.Dir, "bundle.Dockerfile"), + } + if err = removeAllAnnotationLines(metadataAnnotations, metadataFiles); err != nil { + return err + } + + // Remove manifests annotations. + manifestsAnnotations := metrics.MakeBundleObjectAnnotations("") + manifestsFiles := []string{ + filepath.Join(ctx.Dir, "bundle", "manifests", ctx.ProjectName+".clusterserviceversion.yaml"), + filepath.Join(ctx.Dir, "config", "manifests", "bases", ctx.ProjectName+".clusterserviceversion.yaml"), + } + if err = removeAllAnnotationLines(manifestsAnnotations, manifestsFiles); err != nil { + return err + } + + return nil +} + +// removeAllAnnotationLines removes each line containing a key in annotations from all files at filePaths. +func removeAllAnnotationLines(annotations map[string]string, filePaths []string) error { + var annotationREs []*regexp.Regexp + for annotation := range annotations { + re, err := regexp.Compile(".+" + regexp.QuoteMeta(annotation) + ".+\n") + if err != nil { + return fmt.Errorf("compiling annotation regexp: %v", err) + } + annotationREs = append(annotationREs, re) + } + + for _, file := range filePaths { + b, err := os.ReadFile(file) + if err != nil { + return err + } + for _, re := range annotationREs { + b = re.ReplaceAll(b, []byte{}) + } + err = os.WriteFile(file, b, 0644) + if err != nil { + return err + } + } + return nil +} diff --git a/hack/generate/samples/molecule/generate.go b/hack/generate/samples/molecule/generate.go new file mode 100644 index 0000000..d9406d6 --- /dev/null +++ b/hack/generate/samples/molecule/generate.go @@ -0,0 +1,78 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "flag" + "os" + "path/filepath" + "strings" + + log "github.com/sirupsen/logrus" + + "github.com/operator-framework/ansible-operator-plugins/hack/generate/samples/internal/ansible" + "github.com/operator-framework/ansible-operator-plugins/internal/testutils" +) + +// This generate is used to run the e2e molecule tests +func main() { + var ( + // binaryPath allow inform the binary that should be used. + // By default it is operator-sdk + binaryPath string + + // samplesRoot is the path provided to generate the molecule sample + samplesRoot string + + // sample is the name of the mock was selected to be generated + sample string + ) + + flag.StringVar(&binaryPath, "bin", testutils.BinaryName, "Binary path that should be used") + flag.StringVar(&samplesRoot, "samples-root", "", "Path where molecule samples should be generated") + flag.StringVar(&sample, "sample", "", "To generate only the selected option. Options: [advanced, memcached]") + + flag.Parse() + + // Make the binary path absolute if pathed, for reproducibility and debugging purposes. + if dir, _ := filepath.Split(binaryPath); dir != "" { + tmp, err := filepath.Abs(binaryPath) + if err != nil { + log.Fatalf("Failed to make binary path %q absolute: %v", binaryPath, err) + } + binaryPath = tmp + } + + // If no path be provided then the Molecule sample will be create in the testdata/ansible dir + // It can be helpful to check the mock data used in the e2e molecule tests as to develop this sample + // By default this mock is ignored in the .gitignore + if strings.TrimSpace(samplesRoot) == "" { + currentPath, err := os.Getwd() + if err != nil { + log.Fatal(err) + } + samplesRoot = filepath.Join(currentPath, "testdata", "ansible") + } + + log.Infof("creating Ansible Molecule Mock Samples under %s", samplesRoot) + + if sample == "" || sample == "memcached" { + ansible.GenerateMoleculeSample(binaryPath, samplesRoot) + } + + if sample == "" || sample == "advanced" { + ansible.GenerateAdvancedMoleculeSample(binaryPath, samplesRoot) + } +} diff --git a/hack/lib/common.sh b/hack/lib/common.sh new file mode 100644 index 0000000..3a4f899 --- /dev/null +++ b/hack/lib/common.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash + +# Turn colors in this script off by setting the NO_COLOR variable in your +# environment to any value: +NO_COLOR=${NO_COLOR:-""} +if [ -z "$NO_COLOR" ]; then + header_color=$'\e[1;33m' + error_color=$'\e[0;31m' + reset_color=$'\e[0m' +else + header_color='' + error_color='' + reset_color='' +fi + +function log() { printf '%s\n' "$*"; } +function error() { error_text "ERROR:" $* >&2; } +function fatal() { error "$@"; exit 1; } + +function header_text { + echo "$header_color$*$reset_color" +} + +function error_text { + echo "$error_color$*$reset_color" +} + +#=================================================================== +# FUNCTION trap_add () +# +# Purpose: prepends a command to a trap +# +# - 1st arg: code to add +# - remaining args: names of traps to modify +# +# Example: trap_add 'echo "in trap DEBUG"' DEBUG +# +# See: http://stackoverflow.com/questions/3338030/multiple-bash-traps-for-the-same-signal +#=================================================================== +function trap_add() { + trap_add_cmd=$1; shift || fatal "${FUNCNAME} usage error" + new_cmd= + for trap_add_name in "$@"; do + # Grab the currently defined trap commands for this trap + existing_cmd=`trap -p "${trap_add_name}" | awk -F"'" '{print $2}'` + + # Define default command + [ -z "${existing_cmd}" ] && existing_cmd="echo exiting @ `date`" + + # Generate the new command + new_cmd="${trap_add_cmd};${existing_cmd}" + + # Assign the test + trap "${new_cmd}" "${trap_add_name}" || \ + fatal "unable to add to trap ${trap_add_name}" + done +} + +function listPkgDirs() { + go list -f '{{.Dir}}' ./cmd/... ./test/... ./internal/... | grep -v generated +} + +function listFiles() { + # pipeline is much faster than for loop + listPkgDirs | xargs -I {} find {} -name '*.go' | grep -v generated +} diff --git a/hack/tests/e2e-ansible-molecule.sh b/hack/tests/e2e-ansible-molecule.sh new file mode 100755 index 0000000..8c14101 --- /dev/null +++ b/hack/tests/e2e-ansible-molecule.sh @@ -0,0 +1,70 @@ +#!/usr/bin/env bash + +source hack/lib/common.sh + +# load_image_if_kind +# +# load_image_if_kind loads an image into all nodes in a kind cluster. +# +function load_image_if_kind() { + local cluster=${KIND_CLUSTER:-kind} + if [[ "$(kubectl config current-context)" == "kind-${cluster}" ]]; then + kind load docker-image --name "${cluster}" "$1" + fi +} + +set -eu + +header_text "Running ansible molecule tests in a python3 virtual environment" + +# Set up a python3.8 virtual environment. +ENVDIR="$(mktemp -d)" +trap_add "set +u; deactivate; set -u; rm -rf $ENVDIR" EXIT +python3 -m venv "$ENVDIR" +set +u; source "${ENVDIR}/bin/activate"; set -u + +# Install dependencies. +TMPDIR="$(mktemp -d)" +trap_add "rm -rf $TMPDIR" EXIT +pip3 install pyasn1==0.4.7 pyasn1-modules==0.2.6 idna==2.8 ipaddress==1.0.23 +pip3 install cryptography molecule==5.1.0 +pip3 install ansible-lint yamllint +pip3 install docker kubernetes jmespath +ansible-galaxy collection install 'kubernetes.core:==2.4.0' +ansible-galaxy collection install 'operator_sdk.util:==0.4.0' +ansible-galaxy collection install 'community.docker:==3.4.0' + +header_text "Copying molecule testdata scenarios" +ROOTDIR="$(pwd)" +cp -r $ROOTDIR/testdata/ansible/memcached-molecule-operator/ $TMPDIR/memcached-molecule-operator +cp -r $ROOTDIR/testdata/ansible/advanced-molecule-operator/ $TMPDIR/advanced-molecule-operator + +pushd $TMPDIR/memcached-molecule-operator + +header_text "Running Kind test with memcached-molecule-operator" +make kustomize +if [ -f ./bin/kustomize ] ; then + KUSTOMIZE="$(realpath ./bin/kustomize)" +else + KUSTOMIZE="$(which kustomize)" +fi +KUSTOMIZE_PATH=${KUSTOMIZE} TEST_OPERATOR_NAMESPACE=default molecule test -s kind +popd + +header_text "Running Default test with advanced-molecule-operator" + +make test-e2e-setup +pushd $TMPDIR/advanced-molecule-operator + +make kustomize +if [ -f ./bin/kustomize ] ; then + KUSTOMIZE="$(realpath ./bin/kustomize)" +else + KUSTOMIZE="$(which kustomize)" +fi + +DEST_IMAGE="quay.io/example/advanced-molecule-operator:v0.0.1" +docker build -t "$DEST_IMAGE" --no-cache . +load_image_if_kind "$DEST_IMAGE" +KUSTOMIZE_PATH=$KUSTOMIZE OPERATOR_PULL_POLICY=Never OPERATOR_IMAGE=${DEST_IMAGE} TEST_OPERATOR_NAMESPACE=osdk-test molecule test +popd diff --git a/images/ansible-operator/Dockerfile b/images/ansible-operator/Dockerfile new file mode 100644 index 0000000..430157a --- /dev/null +++ b/images/ansible-operator/Dockerfile @@ -0,0 +1,37 @@ +# Build the manager binary +FROM --platform=$BUILDPLATFORM golang:1.19 as builder +ARG TARGETARCH + +WORKDIR /workspace +# Copy the Go Modules manifests +COPY go.mod go.mod +COPY go.sum go.sum +# cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +RUN go mod download + +# Copy the go source +COPY . . + +# Build +RUN GOOS=linux GOARCH=$TARGETARCH make build/ansible-operator + +# Final image. +FROM quay.io/operator-framework/ansible-operator-base:master-50c6ac03746ff4edf582feb9a71d2a7ea6ae6c40 + +ENV HOME=/opt/ansible \ + USER_NAME=ansible \ + USER_UID=1001 + +# Ensure directory permissions are properly set +RUN echo "${USER_NAME}:x:${USER_UID}:0:${USER_NAME} user:${HOME}:/sbin/nologin" >> /etc/passwd \ + && mkdir -p ${HOME}/.ansible/tmp \ + && chown -R ${USER_UID}:0 ${HOME} \ + && chmod -R ug+rwx ${HOME} + +WORKDIR ${HOME} +USER ${USER_UID} + +COPY --from=builder /workspace/build/ansible-operator /usr/local/bin/ansible-operator + +ENTRYPOINT ["/tini", "--", "/usr/local/bin/ansible-operator", "run", "--watches-file=./watches.yaml"] diff --git a/images/ansible-operator/Pipfile b/images/ansible-operator/Pipfile new file mode 100644 index 0000000..9d1d254 --- /dev/null +++ b/images/ansible-operator/Pipfile @@ -0,0 +1,16 @@ +[[source]] +url = "https://pypi.org/simple" +verify_ssl = true +name = "pypi" + +[packages] +ansible-runner = "~=2.3.3" +ansible-runner-http = "~=1.0.0" +ansible-core = "~=2.15.0" +urllib3 = "<2" +kubernetes = "==26.1.0" + +[dev-packages] + +[requires] +python_version = "3.9" diff --git a/images/ansible-operator/Pipfile.lock b/images/ansible-operator/Pipfile.lock new file mode 100644 index 0000000..551c1ff --- /dev/null +++ b/images/ansible-operator/Pipfile.lock @@ -0,0 +1,549 @@ +{ + "_meta": { + "hash": { + "sha256": "f38af862999f7404c088a58040eb5b5f57078c9da07ab3dd5c1c9e4cded529b1" + }, + "pipfile-spec": 6, + "requires": { + "python_version": "3.9" + }, + "sources": [ + { + "name": "pypi", + "url": "https://pypi.org/simple", + "verify_ssl": true + } + ] + }, + "default": { + "ansible-core": { + "hashes": [ + "sha256:2926b1dc96ef69272195c91a1fedfe30b9aacd674cb04628b26626ecb0c8524a", + "sha256:ed28eb4943e480004edc9ba1e9bee979a0650cb2f9372ce4a2bd572838c60d2b" + ], + "index": "pypi", + "version": "==2.15.1" + }, + "ansible-runner": { + "hashes": [ + "sha256:38ff635e4b94791de2956c81e265836ec4965b30e9ee35d72fcf3271dc46b98b", + "sha256:c57ae0d096760d66b2897b0f9009856c7b83fd5428dcb831f470cba348346396" + ], + "index": "pypi", + "version": "==2.3.3" + }, + "ansible-runner-http": { + "hashes": [ + "sha256:97da445b7d5c6663b0cceaf6bd5e9b0b0dff9a4c36eae43c8c916c6208aee915", + "sha256:e2f34880531d4088a5e04967fd5eae602eb400cc4eb541b22c8c6853e342587f" + ], + "index": "pypi", + "version": "==1.0.0" + }, + "cachetools": { + "hashes": [ + "sha256:95ef631eeaea14ba2e36f06437f36463aac3a096799e876ee55e5cdccb102590", + "sha256:dce83f2d9b4e1f732a8cd44af8e8fab2dbe46201467fc98b3ef8f269092bf62b" + ], + "markers": "python_version >= '3.7'", + "version": "==5.3.1" + }, + "certifi": { + "hashes": [ + "sha256:0f0d56dc5a6ad56fd4ba36484d6cc34451e1c6548c61daad8c320169f91eddc7", + "sha256:c6c2e98f5c7869efca1f8916fed228dd91539f9f1b444c314c06eef02980c716" + ], + "markers": "python_version >= '3.6'", + "version": "==2023.5.7" + }, + "cffi": { + "hashes": [ + "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5", + "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef", + "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104", + "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426", + "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405", + "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375", + "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a", + "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e", + "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc", + "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf", + "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185", + "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497", + "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3", + "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35", + "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c", + "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83", + "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21", + "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca", + "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984", + "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac", + "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd", + "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee", + "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a", + "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2", + "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192", + "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7", + "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585", + "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f", + "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e", + "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27", + "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b", + "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e", + "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e", + "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d", + "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c", + "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415", + "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82", + "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02", + "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314", + "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325", + "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c", + "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3", + "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914", + "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045", + "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d", + "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9", + "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5", + "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2", + "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c", + "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3", + "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2", + "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8", + "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d", + "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d", + "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9", + "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162", + "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76", + "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4", + "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e", + "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9", + "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6", + "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b", + "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01", + "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0" + ], + "version": "==1.15.1" + }, + "charset-normalizer": { + "hashes": [ + "sha256:04afa6387e2b282cf78ff3dbce20f0cc071c12dc8f685bd40960cc68644cfea6", + "sha256:04eefcee095f58eaabe6dc3cc2262f3bcd776d2c67005880894f447b3f2cb9c1", + "sha256:0be65ccf618c1e7ac9b849c315cc2e8a8751d9cfdaa43027d4f6624bd587ab7e", + "sha256:0c95f12b74681e9ae127728f7e5409cbbef9cd914d5896ef238cc779b8152373", + "sha256:0ca564606d2caafb0abe6d1b5311c2649e8071eb241b2d64e75a0d0065107e62", + "sha256:10c93628d7497c81686e8e5e557aafa78f230cd9e77dd0c40032ef90c18f2230", + "sha256:11d117e6c63e8f495412d37e7dc2e2fff09c34b2d09dbe2bee3c6229577818be", + "sha256:11d3bcb7be35e7b1bba2c23beedac81ee893ac9871d0ba79effc7fc01167db6c", + "sha256:12a2b561af122e3d94cdb97fe6fb2bb2b82cef0cdca131646fdb940a1eda04f0", + "sha256:12d1a39aa6b8c6f6248bb54550efcc1c38ce0d8096a146638fd4738e42284448", + "sha256:1435ae15108b1cb6fffbcea2af3d468683b7afed0169ad718451f8db5d1aff6f", + "sha256:1c60b9c202d00052183c9be85e5eaf18a4ada0a47d188a83c8f5c5b23252f649", + "sha256:1e8fcdd8f672a1c4fc8d0bd3a2b576b152d2a349782d1eb0f6b8e52e9954731d", + "sha256:20064ead0717cf9a73a6d1e779b23d149b53daf971169289ed2ed43a71e8d3b0", + "sha256:21fa558996782fc226b529fdd2ed7866c2c6ec91cee82735c98a197fae39f706", + "sha256:22908891a380d50738e1f978667536f6c6b526a2064156203d418f4856d6e86a", + "sha256:3160a0fd9754aab7d47f95a6b63ab355388d890163eb03b2d2b87ab0a30cfa59", + "sha256:322102cdf1ab682ecc7d9b1c5eed4ec59657a65e1c146a0da342b78f4112db23", + "sha256:34e0a2f9c370eb95597aae63bf85eb5e96826d81e3dcf88b8886012906f509b5", + "sha256:3573d376454d956553c356df45bb824262c397c6e26ce43e8203c4c540ee0acb", + "sha256:3747443b6a904001473370d7810aa19c3a180ccd52a7157aacc264a5ac79265e", + "sha256:38e812a197bf8e71a59fe55b757a84c1f946d0ac114acafaafaf21667a7e169e", + "sha256:3a06f32c9634a8705f4ca9946d667609f52cf130d5548881401f1eb2c39b1e2c", + "sha256:3a5fc78f9e3f501a1614a98f7c54d3969f3ad9bba8ba3d9b438c3bc5d047dd28", + "sha256:3d9098b479e78c85080c98e1e35ff40b4a31d8953102bb0fd7d1b6f8a2111a3d", + "sha256:3dc5b6a8ecfdc5748a7e429782598e4f17ef378e3e272eeb1340ea57c9109f41", + "sha256:4155b51ae05ed47199dc5b2a4e62abccb274cee6b01da5b895099b61b1982974", + "sha256:49919f8400b5e49e961f320c735388ee686a62327e773fa5b3ce6721f7e785ce", + "sha256:53d0a3fa5f8af98a1e261de6a3943ca631c526635eb5817a87a59d9a57ebf48f", + "sha256:5f008525e02908b20e04707a4f704cd286d94718f48bb33edddc7d7b584dddc1", + "sha256:628c985afb2c7d27a4800bfb609e03985aaecb42f955049957814e0491d4006d", + "sha256:65ed923f84a6844de5fd29726b888e58c62820e0769b76565480e1fdc3d062f8", + "sha256:6734e606355834f13445b6adc38b53c0fd45f1a56a9ba06c2058f86893ae8017", + "sha256:6baf0baf0d5d265fa7944feb9f7451cc316bfe30e8df1a61b1bb08577c554f31", + "sha256:6f4f4668e1831850ebcc2fd0b1cd11721947b6dc7c00bf1c6bd3c929ae14f2c7", + "sha256:6f5c2e7bc8a4bf7c426599765b1bd33217ec84023033672c1e9a8b35eaeaaaf8", + "sha256:6f6c7a8a57e9405cad7485f4c9d3172ae486cfef1344b5ddd8e5239582d7355e", + "sha256:7381c66e0561c5757ffe616af869b916c8b4e42b367ab29fedc98481d1e74e14", + "sha256:73dc03a6a7e30b7edc5b01b601e53e7fc924b04e1835e8e407c12c037e81adbd", + "sha256:74db0052d985cf37fa111828d0dd230776ac99c740e1a758ad99094be4f1803d", + "sha256:75f2568b4189dda1c567339b48cba4ac7384accb9c2a7ed655cd86b04055c795", + "sha256:78cacd03e79d009d95635e7d6ff12c21eb89b894c354bd2b2ed0b4763373693b", + "sha256:80d1543d58bd3d6c271b66abf454d437a438dff01c3e62fdbcd68f2a11310d4b", + "sha256:830d2948a5ec37c386d3170c483063798d7879037492540f10a475e3fd6f244b", + "sha256:891cf9b48776b5c61c700b55a598621fdb7b1e301a550365571e9624f270c203", + "sha256:8f25e17ab3039b05f762b0a55ae0b3632b2e073d9c8fc88e89aca31a6198e88f", + "sha256:9a3267620866c9d17b959a84dd0bd2d45719b817245e49371ead79ed4f710d19", + "sha256:a04f86f41a8916fe45ac5024ec477f41f886b3c435da2d4e3d2709b22ab02af1", + "sha256:aaf53a6cebad0eae578f062c7d462155eada9c172bd8c4d250b8c1d8eb7f916a", + "sha256:abc1185d79f47c0a7aaf7e2412a0eb2c03b724581139193d2d82b3ad8cbb00ac", + "sha256:ac0aa6cd53ab9a31d397f8303f92c42f534693528fafbdb997c82bae6e477ad9", + "sha256:ac3775e3311661d4adace3697a52ac0bab17edd166087d493b52d4f4f553f9f0", + "sha256:b06f0d3bf045158d2fb8837c5785fe9ff9b8c93358be64461a1089f5da983137", + "sha256:b116502087ce8a6b7a5f1814568ccbd0e9f6cfd99948aa59b0e241dc57cf739f", + "sha256:b82fab78e0b1329e183a65260581de4375f619167478dddab510c6c6fb04d9b6", + "sha256:bd7163182133c0c7701b25e604cf1611c0d87712e56e88e7ee5d72deab3e76b5", + "sha256:c36bcbc0d5174a80d6cccf43a0ecaca44e81d25be4b7f90f0ed7bcfbb5a00909", + "sha256:c3af8e0f07399d3176b179f2e2634c3ce9c1301379a6b8c9c9aeecd481da494f", + "sha256:c84132a54c750fda57729d1e2599bb598f5fa0344085dbde5003ba429a4798c0", + "sha256:cb7b2ab0188829593b9de646545175547a70d9a6e2b63bf2cd87a0a391599324", + "sha256:cca4def576f47a09a943666b8f829606bcb17e2bc2d5911a46c8f8da45f56755", + "sha256:cf6511efa4801b9b38dc5546d7547d5b5c6ef4b081c60b23e4d941d0eba9cbeb", + "sha256:d16fd5252f883eb074ca55cb622bc0bee49b979ae4e8639fff6ca3ff44f9f854", + "sha256:d2686f91611f9e17f4548dbf050e75b079bbc2a82be565832bc8ea9047b61c8c", + "sha256:d7fc3fca01da18fbabe4625d64bb612b533533ed10045a2ac3dd194bfa656b60", + "sha256:dd5653e67b149503c68c4018bf07e42eeed6b4e956b24c00ccdf93ac79cdff84", + "sha256:de5695a6f1d8340b12a5d6d4484290ee74d61e467c39ff03b39e30df62cf83a0", + "sha256:e0ac8959c929593fee38da1c2b64ee9778733cdf03c482c9ff1d508b6b593b2b", + "sha256:e1b25e3ad6c909f398df8921780d6a3d120d8c09466720226fc621605b6f92b1", + "sha256:e633940f28c1e913615fd624fcdd72fdba807bf53ea6925d6a588e84e1151531", + "sha256:e89df2958e5159b811af9ff0f92614dabf4ff617c03a4c1c6ff53bf1c399e0e1", + "sha256:ea9f9c6034ea2d93d9147818f17c2a0860d41b71c38b9ce4d55f21b6f9165a11", + "sha256:f645caaf0008bacf349875a974220f1f1da349c5dbe7c4ec93048cdc785a3326", + "sha256:f8303414c7b03f794347ad062c0516cee0e15f7a612abd0ce1e25caf6ceb47df", + "sha256:fca62a8301b605b954ad2e9c3666f9d97f63872aa4efcae5492baca2056b74ab" + ], + "markers": "python_full_version >= '3.7.0'", + "version": "==3.1.0" + }, + "cryptography": { + "hashes": [ + "sha256:059e348f9a3c1950937e1b5d7ba1f8e968508ab181e75fc32b879452f08356db", + "sha256:1a5472d40c8f8e91ff7a3d8ac6dfa363d8e3138b961529c996f3e2df0c7a411a", + "sha256:1a8e6c2de6fbbcc5e14fd27fb24414507cb3333198ea9ab1258d916f00bc3039", + "sha256:1fee5aacc7367487b4e22484d3c7e547992ed726d14864ee33c0176ae43b0d7c", + "sha256:5d092fdfedaec4cbbffbf98cddc915ba145313a6fdaab83c6e67f4e6c218e6f3", + "sha256:5f0ff6e18d13a3de56f609dd1fd11470918f770c6bd5d00d632076c727d35485", + "sha256:7bfc55a5eae8b86a287747053140ba221afc65eb06207bedf6e019b8934b477c", + "sha256:7fa01527046ca5facdf973eef2535a27fec4cb651e4daec4d043ef63f6ecd4ca", + "sha256:8dde71c4169ec5ccc1087bb7521d54251c016f126f922ab2dfe6649170a3b8c5", + "sha256:8f4ab7021127a9b4323537300a2acfb450124b2def3756f64dc3a3d2160ee4b5", + "sha256:948224d76c4b6457349d47c0c98657557f429b4e93057cf5a2f71d603e2fc3a3", + "sha256:9a6c7a3c87d595608a39980ebaa04d5a37f94024c9f24eb7d10262b92f739ddb", + "sha256:b46e37db3cc267b4dea1f56da7346c9727e1209aa98487179ee8ebed09d21e43", + "sha256:b4ceb5324b998ce2003bc17d519080b4ec8d5b7b70794cbd2836101406a9be31", + "sha256:cb33ccf15e89f7ed89b235cff9d49e2e62c6c981a6061c9c8bb47ed7951190bc", + "sha256:d198820aba55660b4d74f7b5fd1f17db3aa5eb3e6893b0a41b75e84e4f9e0e4b", + "sha256:d34579085401d3f49762d2f7d6634d6b6c2ae1242202e860f4d26b046e3a1006", + "sha256:eb8163f5e549a22888c18b0d53d6bb62a20510060a22fd5a995ec8a05268df8a", + "sha256:f73bff05db2a3e5974a6fd248af2566134d8981fd7ab012e5dd4ddb1d9a70699" + ], + "markers": "python_version >= '3.7'", + "version": "==41.0.1" + }, + "docutils": { + "hashes": [ + "sha256:96f387a2c5562db4476f09f13bbab2192e764cac08ebbf3a34a95d9b1e4a59d6", + "sha256:f08a4e276c3a1583a86dce3e34aba3fe04d02bba2dd51ed16106244e8a923e3b" + ], + "markers": "python_version >= '3.7'", + "version": "==0.20.1" + }, + "google-auth": { + "hashes": [ + "sha256:b28e8048e57727e7cf0e5bd8e7276b212aef476654a09511354aa82753b45c66", + "sha256:da3f18d074fa0f5a7061d99b9af8cee3aa6189c987af7c1b07d94566b6b11268" + ], + "markers": "python_version >= '3.6'", + "version": "==2.21.0" + }, + "idna": { + "hashes": [ + "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4", + "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2" + ], + "markers": "python_version >= '3.5'", + "version": "==3.4" + }, + "importlib-metadata": { + "hashes": [ + "sha256:5a66966b39ff1c14ef5b2d60c1d842b0141fefff0f4cc6365b4bc9446c652807", + "sha256:f65e478a7c2177bd19517a3a15dac094d253446d8690c5f3e71e735a04312374" + ], + "markers": "python_version < '3.10'", + "version": "==6.2.1" + }, + "importlib-resources": { + "hashes": [ + "sha256:2238159eb743bd85304a16e0536048b3e991c531d1cd51c4a834d1ccf2829057", + "sha256:4df460394562b4581bb4e4087ad9447bd433148fba44241754ec3152499f1d1b" + ], + "markers": "python_version < '3.10'", + "version": "==5.0.7" + }, + "jinja2": { + "hashes": [ + "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852", + "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61" + ], + "markers": "python_version >= '3.7'", + "version": "==3.1.2" + }, + "kubernetes": { + "hashes": [ + "sha256:5854b0c508e8d217ca205591384ab58389abdae608576f9c9afc35a3c76a366c", + "sha256:e3db6800abf7e36c38d2629b5cb6b74d10988ee0cba6fba45595a7cbe60c0042" + ], + "index": "pypi", + "version": "==26.1.0" + }, + "lockfile": { + "hashes": [ + "sha256:6aed02de03cba24efabcd600b30540140634fc06cfa603822d508d5361e9f799", + "sha256:6c3cb24f344923d30b2785d5ad75182c8ea7ac1b6171b08657258ec7429d50fa" + ], + "version": "==0.12.2" + }, + "markupsafe": { + "hashes": [ + "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e", + "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e", + "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431", + "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686", + "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559", + "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc", + "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c", + "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0", + "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4", + "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9", + "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575", + "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba", + "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d", + "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3", + "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00", + "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155", + "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac", + "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52", + "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f", + "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8", + "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b", + "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24", + "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea", + "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198", + "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0", + "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee", + "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be", + "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2", + "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707", + "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6", + "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58", + "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779", + "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636", + "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c", + "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad", + "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee", + "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc", + "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2", + "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48", + "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7", + "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e", + "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b", + "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa", + "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5", + "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e", + "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb", + "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9", + "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57", + "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc", + "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2" + ], + "markers": "python_version >= '3.7'", + "version": "==2.1.3" + }, + "oauthlib": { + "hashes": [ + "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca", + "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918" + ], + "markers": "python_version >= '3.6'", + "version": "==3.2.2" + }, + "packaging": { + "hashes": [ + "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61", + "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f" + ], + "markers": "python_version >= '3.7'", + "version": "==23.1" + }, + "pexpect": { + "hashes": [ + "sha256:0b48a55dcb3c05f3329815901ea4fc1537514d6ba867a152b581d69ae3710937", + "sha256:fc65a43959d153d0114afe13997d439c22823a27cefceb5ff35c2178c6784c0c" + ], + "version": "==4.8.0" + }, + "ptyprocess": { + "hashes": [ + "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35", + "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220" + ], + "version": "==0.7.0" + }, + "pyasn1": { + "hashes": [ + "sha256:87a2121042a1ac9358cabcaf1d07680ff97ee6404333bacca15f76aa8ad01a57", + "sha256:97b7290ca68e62a832558ec3976f15cbf911bf5d7c7039d8b861c2a0ece69fde" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'", + "version": "==0.5.0" + }, + "pyasn1-modules": { + "hashes": [ + "sha256:5bd01446b736eb9d31512a30d46c1ac3395d676c6f3cafa4c03eb54b9925631c", + "sha256:d3ccd6ed470d9ffbc716be08bd90efbd44d0734bc9303818f7336070984a162d" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'", + "version": "==0.3.0" + }, + "pycparser": { + "hashes": [ + "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9", + "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==2.21" + }, + "python-daemon": { + "hashes": [ + "sha256:42bb848a3260a027fa71ad47ecd959e471327cb34da5965962edd5926229f341", + "sha256:6c57452372f7eaff40934a1c03ad1826bf5e793558e87fef49131e6464b4dae5" + ], + "markers": "python_version >= '3'", + "version": "==3.0.1" + }, + "python-dateutil": { + "hashes": [ + "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86", + "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==2.8.2" + }, + "pyyaml": { + "hashes": [ + "sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf", + "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293", + "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b", + "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57", + "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b", + "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4", + "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07", + "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba", + "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9", + "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287", + "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513", + "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0", + "sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782", + "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0", + "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92", + "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f", + "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2", + "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc", + "sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1", + "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c", + "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86", + "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4", + "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c", + "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34", + "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b", + "sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d", + "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c", + "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb", + "sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7", + "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737", + "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3", + "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d", + "sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358", + "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53", + "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78", + "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803", + "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a", + "sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f", + "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174", + "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5" + ], + "markers": "python_version >= '3.6'", + "version": "==6.0" + }, + "requests": { + "hashes": [ + "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f", + "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1" + ], + "markers": "python_version >= '3.7'", + "version": "==2.31.0" + }, + "requests-oauthlib": { + "hashes": [ + "sha256:2577c501a2fb8d05a304c09d090d6e47c306fef15809d102b327cf8364bddab5", + "sha256:75beac4a47881eeb94d5ea5d6ad31ef88856affe2332b9aafb52c6452ccf0d7a" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==1.3.1" + }, + "requests-unixsocket": { + "hashes": [ + "sha256:28304283ea9357d45fff58ad5b11e47708cfbf5806817aa59b2a363228ee971e", + "sha256:c685c680f0809e1b2955339b1e5afc3c0022b3066f4f7eb343f43a6065fc0e5d" + ], + "version": "==0.3.0" + }, + "resolvelib": { + "hashes": [ + "sha256:04ce76cbd63fded2078ce224785da6ecd42b9564b1390793f64ddecbe997b309", + "sha256:d2da45d1a8dfee81bdd591647783e340ef3bcb104b54c383f70d422ef5cc7dbf" + ], + "version": "==1.0.1" + }, + "rsa": { + "hashes": [ + "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7", + "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21" + ], + "markers": "python_version >= '3.6' and python_version < '4'", + "version": "==4.9" + }, + "setuptools": { + "hashes": [ + "sha256:11e52c67415a381d10d6b462ced9cfb97066179f0e871399e006c4ab101fc85f", + "sha256:baf1fdb41c6da4cd2eae722e135500da913332ab3f2f5c7d33af9b492acb5235" + ], + "markers": "python_version >= '3.7'", + "version": "==68.0.0" + }, + "six": { + "hashes": [ + "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926", + "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==1.16.0" + }, + "urllib3": { + "hashes": [ + "sha256:8d36afa7616d8ab714608411b4a3b13e58f463aee519024578e062e141dce20f", + "sha256:8f135f6502756bde6b2a9b28989df5fbe87c9970cecaa69041edcce7f0589b14" + ], + "index": "pypi", + "version": "==1.26.16" + }, + "websocket-client": { + "hashes": [ + "sha256:c951af98631d24f8df89ab1019fc365f2227c0892f12fd150e935607c79dd0dd", + "sha256:f1f9f2ad5291f0225a49efad77abf9e700b6fef553900623060dad6e26503b9d" + ], + "markers": "python_version >= '3.7'", + "version": "==1.6.1" + }, + "zipp": { + "hashes": [ + "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b", + "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556" + ], + "markers": "python_version >= '3.7'", + "version": "==3.15.0" + } + }, + "develop": {} +} diff --git a/images/ansible-operator/base.Dockerfile b/images/ansible-operator/base.Dockerfile new file mode 100644 index 0000000..679ee8d --- /dev/null +++ b/images/ansible-operator/base.Dockerfile @@ -0,0 +1,60 @@ +# This Dockerfile defines the base image for the ansible-operator image. +# It is built with dependencies that take a while to download, thus speeding +# up ansible deploy jobs. + +FROM registry.access.redhat.com/ubi8/ubi:8.8 AS builder + +# Install Rust so that we can ensure backwards compatibility with installing/building the cryptography wheel across all platforms +RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y +ENV PATH="/root/.cargo/bin:${PATH}" +RUN rustc --version + +# Copy python dependencies (including ansible) to be installed using Pipenv +COPY Pipfile* ./ +# Instruct pip(env) not to keep a cache of installed packages, +# to install into the global site-packages and +# to clear the pipenv cache as well +ENV PIP_NO_CACHE_DIR=1 \ + PIPENV_SYSTEM=1 \ + PIPENV_CLEAR=1 +# Ensure fresh metadata rather than cached metadata, install system and pip python deps, +# and remove those not needed at runtime. +# pip3~=21.1 fixes a vulnerability described in https://github.com/pypa/pip/pull/9827. +RUN set -e && yum clean all && rm -rf /var/cache/yum/* \ + && yum update -y \ + && yum install -y libffi-devel openssl-devel python39-devel gcc python39-pip python39-setuptools \ + && pip3 install --upgrade pip~=23.1.2 \ + && pip3 install pipenv==2023.6.26 \ + && pipenv install --deploy \ + && pipenv check \ + && yum remove -y gcc libffi-devel openssl-devel python39-devel \ + && yum clean all \ + && rm -rf /var/cache/yum + +FROM registry.access.redhat.com/ubi8/ubi:8.8 +ARG TARGETARCH + +# Label this image with the repo and commit that built it, for freshmaking purposes. +ARG GIT_COMMIT=devel +LABEL git_commit=$GIT_COMMIT + +RUN mkdir -p /etc/ansible \ + && echo "localhost ansible_connection=local" > /etc/ansible/hosts \ + && echo '[defaults]' > /etc/ansible/ansible.cfg \ + && echo 'roles_path = /opt/ansible/roles' >> /etc/ansible/ansible.cfg \ + && echo 'library = /usr/share/ansible/openshift' >> /etc/ansible/ansible.cfg + +RUN set -e && yum clean all && rm -rf /var/cache/yum/* \ + && yum update -y \ + && yum install -y python39-pip python39-setuptools \ + && pip3 install --upgrade pip~=23.1.2 \ + && pip3 install pipenv==2023.6.26 \ + && yum clean all \ + && rm -rf /var/cache/yum + +COPY --from=builder /usr/local/lib64/python3.9/site-packages /usr/local/lib64/python3.9/site-packages +COPY --from=builder /usr/local/lib/python3.9/site-packages /usr/local/lib/python3.9/site-packages + +ENV TINI_VERSION=v0.19.0 +RUN curl -L -o /tini https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-${TARGETARCH} \ + && chmod +x /tini && /tini --version \ No newline at end of file diff --git a/internal/annotations/metrics/metrics.go b/internal/annotations/metrics/metrics.go new file mode 100644 index 0000000..b0b32b1 --- /dev/null +++ b/internal/annotations/metrics/metrics.go @@ -0,0 +1,85 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +import ( + "regexp" + "strings" + + sdkversion "github.com/operator-framework/ansible-operator-plugins/internal/version" +) + +// Static bundle annotation values. +const ( + mediaTypeV1 = "metrics+v1" +) + +// Bundle annotation keys. +const ( + mediaTypeBundleAnnotation = "operators.operatorframework.io.metrics.mediatype.v1" + builderBundleAnnotation = "operators.operatorframework.io.metrics.builder" + layoutBundleAnnotation = "operators.operatorframework.io.metrics.project_layout" +) + +// Object annotation keys. +const ( + BuilderObjectAnnotation = "operators.operatorframework.io/builder" + LayoutObjectAnnotation = "operators.operatorframework.io/project_layout" +) + +// MakeBundleMetadataLabels returns the SDK metric labels which will be added +// to bundle resources like bundle.Dockerfile and annotations.yaml. +func MakeBundleMetadataLabels(layout string) map[string]string { + return map[string]string{ + mediaTypeBundleAnnotation: mediaTypeV1, + builderBundleAnnotation: getSDKBuilder(sdkversion.Version), + layoutBundleAnnotation: layout, + } +} + +// MakeBundleObjectAnnotations returns the SDK metric annotations which will be added +// to CustomResourceDefinitions and ClusterServiceVersions. +func MakeBundleObjectAnnotations(layout string) map[string]string { + return map[string]string{ + BuilderObjectAnnotation: getSDKBuilder(sdkversion.Version), + LayoutObjectAnnotation: layout, + } +} + +func getSDKBuilder(rawSDKVersion string) string { + return "operator-sdk" + "-" + parseVersion(rawSDKVersion) +} + +func parseVersion(input string) string { + re := regexp.MustCompile(`v[0-9]+\.[0-9]+\.[0-9]+`) + version := re.FindString(input) + if version == "" { + return "unknown" + } + + if isUnreleased(input) { + version = version + "+git" + } + return version +} + +// isUnreleased returns true if sdk was not built from released version. +func isUnreleased(input string) bool { + if strings.Contains(input, "+git") { + return true + } + re := regexp.MustCompile(`v[0-9]+\.[0-9]+\.[0-9]+-.+`) + return re.MatchString(input) +} diff --git a/internal/annotations/metrics/metrics_suite_test.go b/internal/annotations/metrics/metrics_suite_test.go new file mode 100644 index 0000000..dbbf40f --- /dev/null +++ b/internal/annotations/metrics/metrics_suite_test.go @@ -0,0 +1,27 @@ +// Copyright 2021 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestMetrics(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Metrics Suite") +} diff --git a/internal/annotations/metrics/metrics_test.go b/internal/annotations/metrics/metrics_test.go new file mode 100644 index 0000000..eeeb7c0 --- /dev/null +++ b/internal/annotations/metrics/metrics_test.go @@ -0,0 +1,45 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("SDK Label helper functions", func() { + Describe("parseVersion", func() { + It("should extract sdk version", func() { + version := "v0.17.0-159-ge87627f4-dirty" + output := parseVersion(version) + Expect(output).To(Equal("v0.17.0+git")) + }) + It("should extract sdk version", func() { + version := "v0.18.0" + output := parseVersion(version) + Expect(output).To(Equal("v0.18.0")) + }) + It("should extract sdk version", func() { + version := "v0.18.0-ge87627f4" + output := parseVersion(version) + Expect(output).To(Equal("v0.18.0+git")) + }) + It("should return unknown", func() { + version := "noneSemanticVersion" + output := parseVersion(version) + Expect(output).To(Equal("unknown")) + }) + }) +}) diff --git a/internal/annotations/scorecard/scorecard.go b/internal/annotations/scorecard/scorecard.go new file mode 100644 index 0000000..e1a6262 --- /dev/null +++ b/internal/annotations/scorecard/scorecard.go @@ -0,0 +1,54 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scorecard + +import ( + "path/filepath" +) + +// Static bundle annotation values. +const ( + mediaTypeV1 = "scorecard+v1" +) + +// Bundle annotation keys. +// NB(estroz): version these keys based on their "vX" version (either with the version in their names, +// or in subpackages). This may be a requirement if we create "v2" keys. +const ( + mediaTypeBundleKey = "operators.operatorframework.io.test.mediatype.v1" + configBundleKey = "operators.operatorframework.io.test.config.v1" +) + +func MakeBundleMetadataLabels(configDir string) map[string]string { + return map[string]string{ + mediaTypeBundleKey: mediaTypeV1, + configBundleKey: configDir, + } +} + +func GetConfigDir(labels map[string]string) (value string, hasKey bool) { + if configKey, hasMTKey := configKeyForMediaType(labels); hasMTKey { + value, hasKey = labels[configKey] + } + return filepath.Clean(filepath.FromSlash(value)), hasKey +} + +func configKeyForMediaType(labels map[string]string) (string, bool) { + switch labels[mediaTypeBundleKey] { + case mediaTypeV1: + return configBundleKey, true + } + return "", false +} diff --git a/internal/ansible/apiserver/apiserver.go b/internal/ansible/apiserver/apiserver.go new file mode 100644 index 0000000..dca20ab --- /dev/null +++ b/internal/ansible/apiserver/apiserver.go @@ -0,0 +1,77 @@ +// Copyright 2022 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package apiserver + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "time" + + logf "sigs.k8s.io/controller-runtime/pkg/log" + crmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" + + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/metrics" +) + +var log = logf.Log.WithName("apiserver") + +type Options struct { + Address string + Port int +} + +func Run(options Options) error { + mux := http.NewServeMux() + mux.HandleFunc("/metrics", metricsHandler) + + server := http.Server{ + Addr: fmt.Sprintf("%s:%d", options.Address, options.Port), + Handler: mux, + ReadHeaderTimeout: 5 * time.Second, + } + log.Info("Starting to serve metrics listener", "Address", server.Addr) + return server.ListenAndServe() +} + +func metricsHandler(w http.ResponseWriter, r *http.Request) { + defer func() { + _, _ = io.Copy(io.Discard, r.Body) + r.Body.Close() + }() + log.V(3).Info(fmt.Sprintf("%s %s", r.Method, r.URL)) + + var userMetric metrics.UserMetric + + switch r.Method { + case http.MethodPost: + log.V(3).Info("The apiserver has received a POST") + err := json.NewDecoder(r.Body).Decode(&userMetric) + if err != nil { + log.Info(err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + err = metrics.HandleUserMetric(crmetrics.Registry, userMetric) + if err != nil { + log.Info(err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) + } + default: + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + } + +} diff --git a/internal/ansible/controller/controller.go b/internal/ansible/controller/controller.go new file mode 100644 index 0000000..a6e5fdd --- /dev/null +++ b/internal/ansible/controller/controller.go @@ -0,0 +1,149 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "fmt" + "os" + "reflect" + "strings" + "time" + + libpredicate "github.com/operator-framework/operator-lib/predicate" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/controller" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/manager" + ctrlpredicate "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/source" + + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/events" + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/handler" + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/runner" +) + +var log = logf.Log.WithName("ansible-controller") + +// Options - options for your controller +type Options struct { + EventHandlers []events.EventHandler + LoggingLevel events.LogLevel + Runner runner.Runner + GVK schema.GroupVersionKind + ReconcilePeriod time.Duration + ManageStatus bool + AnsibleDebugLogs bool + WatchDependentResources bool + WatchClusterScopedResources bool + WatchAnnotationsChanges bool + MaxConcurrentReconciles int + Selector metav1.LabelSelector +} + +// Add - Creates a new ansible operator controller and adds it to the manager +func Add(mgr manager.Manager, options Options) *controller.Controller { + log.Info("Watching resource", "Options.Group", options.GVK.Group, "Options.Version", + options.GVK.Version, "Options.Kind", options.GVK.Kind) + if options.EventHandlers == nil { + options.EventHandlers = []events.EventHandler{} + } + eventHandlers := append(options.EventHandlers, events.NewLoggingEventHandler(options.LoggingLevel)) + + aor := &AnsibleOperatorReconciler{ + Client: mgr.GetClient(), + GVK: options.GVK, + Runner: options.Runner, + EventHandlers: eventHandlers, + ReconcilePeriod: options.ReconcilePeriod, + ManageStatus: options.ManageStatus, + AnsibleDebugLogs: options.AnsibleDebugLogs, + APIReader: mgr.GetAPIReader(), + WatchAnnotationsChanges: options.WatchAnnotationsChanges, + } + + scheme := mgr.GetScheme() + _, err := scheme.New(options.GVK) + if runtime.IsNotRegisteredError(err) { + // Register the GVK with the schema + scheme.AddKnownTypeWithName(options.GVK, &unstructured.Unstructured{}) + metav1.AddToGroupVersion(mgr.GetScheme(), schema.GroupVersion{ + Group: options.GVK.Group, + Version: options.GVK.Version, + }) + } else if err != nil { + log.Error(err, "") + os.Exit(1) + } + + //Create new controller runtime controller and set the controller to watch GVK. + c, err := controller.New(fmt.Sprintf("%v-controller", strings.ToLower(options.GVK.Kind)), mgr, + controller.Options{ + Reconciler: aor, + MaxConcurrentReconciles: options.MaxConcurrentReconciles, + }) + if err != nil { + log.Error(err, "") + os.Exit(1) + } + + // Set up predicates. + predicates := []ctrlpredicate.Predicate{ + ctrlpredicate.Or(ctrlpredicate.GenerationChangedPredicate{}, libpredicate.NoGenerationPredicate{}), + } + + if options.WatchAnnotationsChanges { + predicates = []ctrlpredicate.Predicate{ + ctrlpredicate.Or(ctrlpredicate.AnnotationChangedPredicate{}, predicates[0]), + } + } + + p, err := parsePredicateSelector(options.Selector) + + if err != nil { + log.Error(err, "") + os.Exit(1) + } + + if p != nil { + predicates = append(predicates, p) + } + + u := &unstructured.Unstructured{} + u.SetGroupVersionKind(options.GVK) + err = c.Watch(&source.Kind{Type: u}, &handler.LoggingEnqueueRequestForObject{}, predicates...) + if err != nil { + log.Error(err, "") + os.Exit(1) + } + + return &c +} + +// parsePredicateSelector parses the selector in the WatchOptions and creates a predicate +// that is used to filter resources based on the specified selector +func parsePredicateSelector(selector metav1.LabelSelector) (ctrlpredicate.Predicate, error) { + // If a selector has been specified in watches.yaml, add it to the watch's predicates. + if !reflect.ValueOf(selector).IsZero() { + p, err := ctrlpredicate.LabelSelectorPredicate(selector) + if err != nil { + return nil, fmt.Errorf("error constructing predicate from watches selector: %v", err) + } + return p, nil + } + return nil, nil +} diff --git a/internal/ansible/controller/controller_test.go b/internal/ansible/controller/controller_test.go new file mode 100644 index 0000000..968fdef --- /dev/null +++ b/internal/ansible/controller/controller_test.go @@ -0,0 +1,39 @@ +// Copyright 2021 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestFilterPredicate(t *testing.T) { + matchLabelPass := make(map[string]string) + matchLabelPass["testKey"] = "testValue" + selectorPass := metav1.LabelSelector{ + MatchLabels: matchLabelPass, + } + noSelector := metav1.LabelSelector{} + + passPredicate, err := parsePredicateSelector(selectorPass) + assert.Equal(t, nil, err, "Verify that no error is thrown on a valid populated selector") + assert.NotEqual(t, nil, passPredicate, "Verify that a predicate is constructed using a valid selector") + + nilPredicate, err := parsePredicateSelector(noSelector) + assert.Equal(t, nil, err, "Verify that no error is thrown on a valid unpopulated selector") + assert.Equal(t, nil, nilPredicate, "Verify correct parsing of an unpopulated selector") +} diff --git a/internal/ansible/controller/reconcile.go b/internal/ansible/controller/reconcile.go new file mode 100644 index 0000000..0919f33 --- /dev/null +++ b/internal/ansible/controller/reconcile.go @@ -0,0 +1,457 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "math/rand" + "os" + "strconv" + "strings" + "time" + + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + ansiblestatus "github.com/operator-framework/ansible-operator-plugins/internal/ansible/controller/status" + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/events" + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/metrics" + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/proxy/kubeconfig" + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/runner" + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/runner/eventapi" +) + +const ( + // ReconcilePeriodAnnotation - annotation used by a user to specify the reconciliation interval for the CR. + // To use create a CR with an annotation "ansible.sdk.operatorframework.io/reconcile-period: 30s" or some other valid + // Duration. This will override the operators/or controllers reconcile period for that particular CR. + ReconcilePeriodAnnotation = "ansible.sdk.operatorframework.io/reconcile-period" +) + +// AnsibleOperatorReconciler - object to reconcile runner requests +type AnsibleOperatorReconciler struct { + GVK schema.GroupVersionKind + Runner runner.Runner + Client client.Client + APIReader client.Reader + EventHandlers []events.EventHandler + ReconcilePeriod time.Duration + ManageStatus bool + AnsibleDebugLogs bool + WatchAnnotationsChanges bool +} + +// Reconcile - handle the event. +func (r *AnsibleOperatorReconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { //nolint:gocyclo + // TODO: Try to reduce the complexity of this last measured at 42 (failing at > 30) and remove the // nolint:gocyclo + u := &unstructured.Unstructured{} + u.SetGroupVersionKind(r.GVK) + err := r.Client.Get(ctx, request.NamespacedName, u) + if apierrors.IsNotFound(err) { + return reconcile.Result{}, nil + } + if err != nil { + return reconcile.Result{}, err + } + + ident := strconv.Itoa(rand.Int()) + logger := logf.Log.WithName("reconciler").WithValues( + "job", ident, + "name", u.GetName(), + "namespace", u.GetNamespace(), + ) + + reconcileResult := reconcile.Result{RequeueAfter: r.ReconcilePeriod} + if ds, ok := u.GetAnnotations()[ReconcilePeriodAnnotation]; ok { + duration, err := time.ParseDuration(ds) + if err != nil { + // Should attempt to update to a failed condition + errmark := r.markError(ctx, request.NamespacedName, u, + fmt.Sprintf("Unable to parse reconcile period annotation: %v", err)) + if errmark != nil { + logger.Error(errmark, "Unable to mark error annotation") + } + logger.Error(err, "Unable to parse reconcile period annotation") + return reconcileResult, err + } + reconcileResult.RequeueAfter = duration + } + + deleted := u.GetDeletionTimestamp() != nil + finalizer, finalizerExists := r.Runner.GetFinalizer() + if !controllerutil.ContainsFinalizer(u, finalizer) { + if deleted { + // If the resource is being deleted we don't want to add the finalizer again + logger.Info("Resource is terminated, skipping reconciliation") + return reconcile.Result{}, nil + } else if finalizerExists { + logger.V(1).Info("Adding finalizer to resource", "Finalizer", finalizer) + controllerutil.AddFinalizer(u, finalizer) + err := r.Client.Update(ctx, u) + if err != nil { + logger.Error(err, "Unable to update cr with finalizer") + return reconcileResult, err + } + } + } + + spec := u.Object["spec"] + _, ok := spec.(map[string]interface{}) + // Need to handle cases where there is no spec. + // We can add the spec to the object, which will allow + // everything to work, and will not get updated. + // Therefore we can now deal with the case of secrets and configmaps. + if !ok { + logger.V(1).Info("Spec was not found") + u.Object["spec"] = map[string]interface{}{} + } + + if r.ManageStatus { + errmark := r.markRunning(ctx, request.NamespacedName, u) + if errmark != nil { + logger.Error(errmark, "Unable to update the status to mark cr as running") + return reconcileResult, errmark + } + } + + ownerRef := metav1.OwnerReference{ + APIVersion: u.GetAPIVersion(), + Kind: u.GetKind(), + Name: u.GetName(), + UID: u.GetUID(), + } + + kc, err := kubeconfig.Create(ownerRef, "http://localhost:8888", u.GetNamespace()) + if err != nil { + errmark := r.markError(ctx, request.NamespacedName, u, "Unable to run reconciliation") + if errmark != nil { + logger.Error(errmark, "Unable to mark error to run reconciliation") + } + logger.Error(err, "Unable to generate kubeconfig") + return reconcileResult, err + } + defer func() { + if err := os.Remove(kc.Name()); err != nil { + logger.Error(err, "Failed to remove generated kubeconfig file") + } + }() + result, err := r.Runner.Run(ident, u, kc.Name()) + if err != nil { + errmark := r.markError(ctx, request.NamespacedName, u, "Unable to run reconciliation") + if errmark != nil { + logger.Error(errmark, "Unable to mark error to run reconciliation") + } + logger.Error(err, "Unable to run ansible runner") + return reconcileResult, err + } + + // iterate events from ansible, looking for the final one + statusEvent := eventapi.StatusJobEvent{} + failureMessages := eventapi.FailureMessages{} + for event := range result.Events() { + for _, eHandler := range r.EventHandlers { + go eHandler.Handle(ident, u, event) + } + if event.Event == eventapi.EventPlaybookOnStats { + // convert to StatusJobEvent; would love a better way to do this + data, err := json.Marshal(event) + if err != nil { + printEventStats(statusEvent, u) + return reconcile.Result{}, err + } + err = json.Unmarshal(data, &statusEvent) + if err != nil { + printEventStats(statusEvent, u) + return reconcile.Result{}, err + } + } + if module, found := event.EventData["task_action"]; found { + if module == "operator_sdk.util.requeue_after" || module == "requeue_after" && event.Event != eventapi.EventRunnerOnFailed { + if data, exists := event.EventData["res"]; exists { + if fields, check := data.(map[string]interface{}); check { + requeueDuration, err := time.ParseDuration(fields["period"].(string)) + if err != nil { + logger.Error(err, "Unable to parse time input") + return reconcileResult, err + } + reconcileResult.RequeueAfter = requeueDuration + logger.Info(fmt.Sprintf("Set the reconciliation to occur after %s", requeueDuration)) + return reconcileResult, nil + } + } + } + } + if event.Event == eventapi.EventRunnerOnFailed && !event.IgnoreError() && !event.Rescued() { + failureMessages = append(failureMessages, event.GetFailedPlaybookMessage()) + } + } + + // To print the stats of the task + printEventStats(statusEvent, u) + + // To print the full ansible result + r.printAnsibleResult(result, u) + + if statusEvent.Event == "" { + eventErr := errors.New("did not receive playbook_on_stats event") + stdout, err := result.Stdout() + if err != nil { + errmark := r.markError(ctx, request.NamespacedName, u, "Failed to get ansible-runner stdout") + if errmark != nil { + logger.Error(errmark, "Unable to mark error to run reconciliation") + } + logger.Error(err, "Failed to get ansible-runner stdout") + return reconcileResult, err + } + logger.Error(eventErr, stdout) + return reconcileResult, eventErr + } + + // Need to get the unstructured object after the Ansible runner finishes. + // This needs to hit the API server to retrieve updates. + err = r.APIReader.Get(ctx, request.NamespacedName, u) + if err != nil { + if apierrors.IsNotFound(err) { + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + // We only want to update the CustomResource once, so we'll track changes + // and do it at the end + runSuccessful := len(failureMessages) == 0 + + recentlyDeleted := u.GetDeletionTimestamp() != nil + + // The finalizer has run successfully, time to remove it + if deleted && finalizerExists && runSuccessful { + controllerutil.RemoveFinalizer(u, finalizer) + err := r.Client.Update(ctx, u) + if err != nil { + logger.Error(err, "Failed to remove finalizer") + return reconcileResult, err + } + } else if recentlyDeleted && finalizerExists { + // If the CR was deleted after the reconcile began, we need to requeue for the finalizer. + reconcileResult.Requeue = true + } + if r.ManageStatus { + errmark := r.markDone(ctx, request.NamespacedName, u, statusEvent, failureMessages) + if errmark != nil { + logger.Error(errmark, "Failed to mark status done") + } + // re-trigger reconcile because of failures + if !runSuccessful { + return reconcileResult, errors.New("event runner on failed") + } + return reconcileResult, errmark + } + + // re-trigger reconcile because of failures + if !runSuccessful { + return reconcileResult, errors.New("received failed task event") + } + return reconcileResult, nil +} + +func printEventStats(statusEvent eventapi.StatusJobEvent, u *unstructured.Unstructured) { + if len(statusEvent.StdOut) > 0 { + str := fmt.Sprintf("Ansible Task Status Event StdOut (%s, %s/%s)", u.GroupVersionKind(), u.GetName(), u.GetNamespace()) + fmt.Printf("\n----- %70s -----\n\n%s\n\n----------\n", str, statusEvent.StdOut) + } +} + +func (r *AnsibleOperatorReconciler) printAnsibleResult(result runner.RunResult, u *unstructured.Unstructured) { + if r.AnsibleDebugLogs { + if res, err := result.Stdout(); err == nil && len(res) > 0 { + str := fmt.Sprintf("Ansible Debug Result (%s, %s/%s)", u.GroupVersionKind(), u.GetName(), u.GetNamespace()) + fmt.Printf("\n----- %70s -----\n\n%s\n\n----------\n", str, res) + } + } +} + +func (r *AnsibleOperatorReconciler) markRunning(ctx context.Context, nn types.NamespacedName, u *unstructured.Unstructured) error { + + // Get the latest resource to prevent updating a stale status. + if err := r.APIReader.Get(ctx, nn, u); err != nil { + return err + } + crStatus := getStatus(u) + + // If there is no current status add that we are working on this resource. + errCond := ansiblestatus.GetCondition(crStatus, ansiblestatus.FailureConditionType) + if errCond != nil { + errCond.Status = v1.ConditionFalse + ansiblestatus.SetCondition(&crStatus, *errCond) + } + successCond := ansiblestatus.GetCondition(crStatus, ansiblestatus.SuccessfulConditionType) + if successCond != nil { + successCond.Status = v1.ConditionFalse + ansiblestatus.SetCondition(&crStatus, *successCond) + } + // If the condition is currently running, making sure that the values are correct. + // If they are the same a no-op, if they are different then it is a good thing we + // are updating it. + c := ansiblestatus.NewCondition( + ansiblestatus.RunningConditionType, + v1.ConditionTrue, + nil, + ansiblestatus.RunningReason, + ansiblestatus.RunningMessage, + ) + ansiblestatus.SetCondition(&crStatus, *c) + u.Object["status"] = crStatus.GetJSONMap() + + return r.Client.Status().Update(ctx, u) +} + +// markError - used to alert the user to the issues during the validation of a reconcile run. +// i.e Annotations that could be incorrect +func (r *AnsibleOperatorReconciler) markError(ctx context.Context, nn types.NamespacedName, u *unstructured.Unstructured, + failureMessage string) error { + + logger := logf.Log.WithName("markError") + // Immediately update metrics with failed reconciliation, since Get() + // may fail. + metrics.ReconcileFailed(r.GVK.String()) + // Get the latest resource to prevent updating a stale status. + if err := r.APIReader.Get(ctx, nn, u); err != nil { + if apierrors.IsNotFound(err) { + logger.Info("Resource not found, assuming it was deleted") + return nil + } + return err + } + crStatus := getStatus(u) + + rc := ansiblestatus.GetCondition(crStatus, ansiblestatus.RunningConditionType) + if rc != nil { + rc.Status = v1.ConditionFalse + ansiblestatus.SetCondition(&crStatus, *rc) + } + sc := ansiblestatus.GetCondition(crStatus, ansiblestatus.SuccessfulConditionType) + if sc != nil { + sc.Status = v1.ConditionFalse + ansiblestatus.SetCondition(&crStatus, *sc) + } + + c := ansiblestatus.NewCondition( + ansiblestatus.FailureConditionType, + v1.ConditionTrue, + nil, + ansiblestatus.FailedReason, + failureMessage, + ) + ansiblestatus.SetCondition(&crStatus, *c) + // This needs the status subresource to be enabled by default. + u.Object["status"] = crStatus.GetJSONMap() + + return r.Client.Status().Update(ctx, u) +} + +func (r *AnsibleOperatorReconciler) markDone(ctx context.Context, nn types.NamespacedName, u *unstructured.Unstructured, + statusEvent eventapi.StatusJobEvent, failureMessages eventapi.FailureMessages) error { + + logger := logf.Log.WithName("markDone") + // Get the latest resource to prevent updating a stale status. + if err := r.APIReader.Get(ctx, nn, u); err != nil { + if apierrors.IsNotFound(err) { + logger.Info("Resource not found, assuming it was deleted") + return nil + } + return err + } + crStatus := getStatus(u) + + runSuccessful := len(failureMessages) == 0 + ansibleStatus := ansiblestatus.NewAnsibleResultFromStatusJobEvent(statusEvent) + + if runSuccessful { + metrics.ReconcileSucceeded(r.GVK.String()) + deprecatedRunningCondition := ansiblestatus.NewCondition( + ansiblestatus.RunningConditionType, + v1.ConditionTrue, + ansibleStatus, + ansiblestatus.SuccessfulReason, + ansiblestatus.AwaitingMessage, + ) + failureCondition := ansiblestatus.NewCondition( + ansiblestatus.FailureConditionType, + v1.ConditionFalse, + nil, + "", + "", + ) + successfulCondition := ansiblestatus.NewCondition( + ansiblestatus.SuccessfulConditionType, + v1.ConditionTrue, + nil, + ansiblestatus.SuccessfulReason, + ansiblestatus.SuccessfulMessage, + ) + ansiblestatus.SetCondition(&crStatus, *deprecatedRunningCondition) + ansiblestatus.SetCondition(&crStatus, *successfulCondition) + ansiblestatus.SetCondition(&crStatus, *failureCondition) + } else { + metrics.ReconcileFailed(r.GVK.String()) + sc := ansiblestatus.GetCondition(crStatus, ansiblestatus.RunningConditionType) + if sc != nil { + sc.Status = v1.ConditionFalse + ansiblestatus.SetCondition(&crStatus, *sc) + } + failureCondition := ansiblestatus.NewCondition( + ansiblestatus.FailureConditionType, + v1.ConditionTrue, + ansibleStatus, + ansiblestatus.FailedReason, + strings.Join(failureMessages, "\n"), + ) + successfulCondition := ansiblestatus.NewCondition( + ansiblestatus.SuccessfulConditionType, + v1.ConditionFalse, + nil, + "", + "", + ) + ansiblestatus.SetCondition(&crStatus, *failureCondition) + ansiblestatus.SetCondition(&crStatus, *successfulCondition) + } + // This needs the status subresource to be enabled by default. + u.Object["status"] = crStatus.GetJSONMap() + + return r.Client.Status().Update(ctx, u) +} + +// getStatus returns u's "status" block as a status.Status. +func getStatus(u *unstructured.Unstructured) ansiblestatus.Status { + statusInterface := u.Object["status"] + statusMap, ok := statusInterface.(map[string]interface{}) + // If the map is not available create one. + if !ok { + statusMap = map[string]interface{}{} + } + return ansiblestatus.CreateFromMap(statusMap) +} diff --git a/internal/ansible/controller/reconcile_test.go b/internal/ansible/controller/reconcile_test.go new file mode 100644 index 0000000..84c5863 --- /dev/null +++ b/internal/ansible/controller/reconcile_test.go @@ -0,0 +1,598 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller_test + +import ( + "context" + "reflect" + "testing" + "time" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/controller" + ansiblestatus "github.com/operator-framework/ansible-operator-plugins/internal/ansible/controller/status" + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/events" + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/runner" + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/runner/eventapi" + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/runner/fake" +) + +func TestReconcile(t *testing.T) { + gvk := schema.GroupVersionKind{ + Kind: "Testing", + Group: "operator-sdk", + Version: "v1beta1", + } + eventTime := time.Now() + testCases := []struct { + Name string + GVK schema.GroupVersionKind + ReconcilePeriod time.Duration + Runner runner.Runner + EventHandlers []events.EventHandler + Client client.Client + ExpectedObject *unstructured.Unstructured + Result reconcile.Result + Request reconcile.Request + ShouldError bool + ManageStatus bool + }{ + { + Name: "cr not found", + GVK: gvk, + ReconcilePeriod: 5 * time.Second, + Runner: &fake.Runner{ + JobEvents: []eventapi.JobEvent{}, + }, + Client: fakeclient.NewClientBuilder().Build(), + Result: reconcile.Result{}, + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "not_found", + Namespace: "default", + }, + }, + }, + { + Name: "completed reconcile", + GVK: gvk, + ReconcilePeriod: 5 * time.Second, + ManageStatus: true, + Runner: &fake.Runner{ + JobEvents: []eventapi.JobEvent{ + eventapi.JobEvent{ + Event: eventapi.EventPlaybookOnStats, + Created: eventapi.EventTime{Time: eventTime}, + }, + }, + }, + Client: fakeclient.NewClientBuilder().WithObjects(&unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "reconcile", + "namespace": "default", + }, + "apiVersion": "operator-sdk/v1beta1", + "kind": "Testing", + }, + }).Build(), + Result: reconcile.Result{ + RequeueAfter: 5 * time.Second, + }, + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "reconcile", + Namespace: "default", + }, + }, + ExpectedObject: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "reconcile", + "namespace": "default", + }, + "apiVersion": "operator-sdk/v1beta1", + "kind": "Testing", + "spec": map[string]interface{}{}, + "status": map[string]interface{}{ + "conditions": []interface{}{ + map[string]interface{}{ + "status": "True", + "type": "Running", + "ansibleResult": map[string]interface{}{ + "changed": int64(0), + "failures": int64(0), + "ok": int64(0), + "skipped": int64(0), + "completion": eventTime.Format("2006-01-02T15:04:05.99999999"), + }, + "message": "Awaiting next reconciliation", + "reason": "Successful", + }, + map[string]interface{}{ + "status": "True", + "type": "Successful", + "message": "Last reconciliation succeeded", + "reason": "Successful", + }, + map[string]interface{}{ + "status": "False", + "type": "Failure", + }, + }, + }, + }, + }, + }, + { + Name: "Failure event runner on failed with manageStatus == true", + GVK: gvk, + ManageStatus: true, + Runner: &fake.Runner{ + JobEvents: []eventapi.JobEvent{ + eventapi.JobEvent{ + Event: eventapi.EventRunnerOnFailed, + Created: eventapi.EventTime{Time: eventTime}, + EventData: map[string]interface{}{ + "res": map[string]interface{}{ + "msg": "new failure message", + }, + }, + }, + eventapi.JobEvent{ + Event: eventapi.EventPlaybookOnStats, + Created: eventapi.EventTime{Time: eventTime}, + }, + }, + }, + Client: fakeclient.NewClientBuilder().WithObjects(&unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "reconcile", + "namespace": "default", + }, + "apiVersion": "operator-sdk/v1beta1", + "kind": "Testing", + "spec": map[string]interface{}{}, + }, + }).Build(), + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "reconcile", + Namespace: "default", + }, + }, + ExpectedObject: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "reconcile", + "namespace": "default", + }, + "apiVersion": "operator-sdk/v1beta1", + "kind": "Testing", + "spec": map[string]interface{}{}, + "status": map[string]interface{}{ + "conditions": []interface{}{ + map[string]interface{}{ + "status": "False", + "type": "Running", + "message": "Running reconciliation", + "reason": "Running", + }, + map[string]interface{}{ + "status": "True", + "type": "Failure", + "ansibleResult": map[string]interface{}{ + "changed": int64(0), + "failures": int64(0), + "ok": int64(0), + "skipped": int64(0), + "completion": eventTime.Format("2006-01-02T15:04:05.99999999"), + }, + "message": "new failure message", + "reason": "Failed", + }, + map[string]interface{}{ + "status": "False", + "type": "Successful", + }, + }, + }, + }, + }, + ShouldError: true, + }, + { + Name: "Failure event runner on failed", + GVK: gvk, + ManageStatus: false, + Runner: &fake.Runner{ + JobEvents: []eventapi.JobEvent{ + eventapi.JobEvent{ + Event: eventapi.EventRunnerOnFailed, + Created: eventapi.EventTime{Time: eventTime}, + EventData: map[string]interface{}{ + "res": map[string]interface{}{ + "msg": "new failure message", + }, + }, + }, + eventapi.JobEvent{ + Event: eventapi.EventPlaybookOnStats, + Created: eventapi.EventTime{Time: eventTime}, + }, + }, + }, + Client: fakeclient.NewClientBuilder().WithObjects(&unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "reconcile", + "namespace": "default", + }, + "apiVersion": "operator-sdk/v1beta1", + "kind": "Testing", + "spec": map[string]interface{}{}, + }, + }).Build(), + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "reconcile", + Namespace: "default", + }, + }, + ShouldError: true, + }, + { + Name: "Finalizer successful reconcile", + GVK: gvk, + ReconcilePeriod: 5 * time.Second, + ManageStatus: true, + Runner: &fake.Runner{ + JobEvents: []eventapi.JobEvent{ + eventapi.JobEvent{ + Event: eventapi.EventPlaybookOnStats, + Created: eventapi.EventTime{Time: eventTime}, + }, + }, + Finalizer: "testing.io/finalizer", + }, + Client: fakeclient.NewClientBuilder().WithObjects(&unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "reconcile", + "namespace": "default", + "annotations": map[string]interface{}{ + controller.ReconcilePeriodAnnotation: "3s", + }, + }, + "apiVersion": "operator-sdk/v1beta1", + "kind": "Testing", + "spec": map[string]interface{}{}, + }, + }).Build(), + Result: reconcile.Result{ + RequeueAfter: 3 * time.Second, + }, + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "reconcile", + Namespace: "default", + }, + }, + ExpectedObject: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "reconcile", + "namespace": "default", + "annotations": map[string]interface{}{ + controller.ReconcilePeriodAnnotation: "3s", + }, + "finalizers": []interface{}{ + "testing.io/finalizer", + }, + }, + "apiVersion": "operator-sdk/v1beta1", + "kind": "Testing", + "spec": map[string]interface{}{}, + "status": map[string]interface{}{ + "conditions": []interface{}{ + map[string]interface{}{ + "status": "True", + "type": "Running", + "ansibleResult": map[string]interface{}{ + "changed": int64(0), + "failures": int64(0), + "ok": int64(0), + "skipped": int64(0), + "completion": eventTime.Format("2006-01-02T15:04:05.99999999"), + }, + "message": "Awaiting next reconciliation", + "reason": "Successful", + }, + map[string]interface{}{ + "status": "True", + "type": "Successful", + "message": "Last reconciliation succeeded", + "reason": "Successful", + }, + map[string]interface{}{ + "status": "False", + "type": "Failure", + }, + }, + }, + }, + }, + }, + { + Name: "reconcile deletetion", + GVK: gvk, + ReconcilePeriod: 5 * time.Second, + Runner: &fake.Runner{ + JobEvents: []eventapi.JobEvent{ + eventapi.JobEvent{ + Event: eventapi.EventPlaybookOnStats, + Created: eventapi.EventTime{Time: eventTime}, + }, + }, + Finalizer: "testing.io/finalizer", + }, + Client: fakeclient.NewClientBuilder().WithObjects(&unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "reconcile", + "namespace": "default", + "annotations": map[string]interface{}{ + controller.ReconcilePeriodAnnotation: "3s", + }, + "deletionTimestamp": eventTime.Format(time.RFC3339), + }, + "apiVersion": "operator-sdk/v1beta1", + "kind": "Testing", + "spec": map[string]interface{}{}, + }, + }).Build(), + Result: reconcile.Result{}, + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "reconcile", + Namespace: "default", + }, + }, + }, + { + Name: "Finalizer successful deletion reconcile", + GVK: gvk, + ReconcilePeriod: 5 * time.Second, + ManageStatus: true, + Runner: &fake.Runner{ + JobEvents: []eventapi.JobEvent{ + eventapi.JobEvent{ + Event: eventapi.EventPlaybookOnStats, + Created: eventapi.EventTime{Time: eventTime}, + }, + }, + Finalizer: "testing.io/finalizer", + }, + Client: fakeclient.NewClientBuilder().WithObjects(&unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "reconcile", + "namespace": "default", + "finalizers": []interface{}{ + "testing.io/finalizer", + }, + "deletionTimestamp": eventTime.Format(time.RFC3339), + }, + "apiVersion": "operator-sdk/v1beta1", + "kind": "Testing", + "spec": map[string]interface{}{}, + "status": map[string]interface{}{ + "conditions": []interface{}{ + map[string]interface{}{ + "status": "True", + "type": "Running", + "ansibleResult": map[string]interface{}{ + "changed": int64(0), + "failures": int64(0), + "ok": int64(0), + "skipped": int64(0), + "completion": eventTime.Format("2006-01-02T15:04:05.99999999"), + }, + "message": "Awaiting next reconciliation", + "reason": "Successful", + }, + }, + }, + }, + }).Build(), + Result: reconcile.Result{ + RequeueAfter: 5 * time.Second, + }, + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "reconcile", + Namespace: "default", + }, + }, + }, + { + Name: "No status event", + GVK: gvk, + ReconcilePeriod: 5 * time.Second, + Runner: &fake.Runner{ + JobEvents: []eventapi.JobEvent{ + eventapi.JobEvent{ + Created: eventapi.EventTime{Time: eventTime}, + }, + }, + }, + Client: fakeclient.NewClientBuilder().WithObjects(&unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "reconcile", + "namespace": "default", + }, + "apiVersion": "operator-sdk/v1beta1", + "kind": "Testing", + "spec": map[string]interface{}{}, + "status": map[string]interface{}{ + "conditions": []interface{}{ + map[string]interface{}{ + "status": "True", + "type": "Running", + "ansibleResult": map[string]interface{}{ + "changed": int64(0), + "failures": int64(0), + "ok": int64(0), + "skipped": int64(0), + "completion": eventTime.Format("2006-01-02T15:04:05.99999999"), + }, + "message": "Failed to get ansible-runner stdout", + }, + }, + }, + }, + }).Build(), + Result: reconcile.Result{ + RequeueAfter: 5 * time.Second, + }, + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "reconcile", + Namespace: "default", + }, + }, + ShouldError: true, + }, + { + Name: "no manage status", + GVK: gvk, + ReconcilePeriod: 5 * time.Second, + ManageStatus: false, + Runner: &fake.Runner{ + JobEvents: []eventapi.JobEvent{ + eventapi.JobEvent{ + Event: eventapi.EventPlaybookOnStats, + Created: eventapi.EventTime{Time: eventTime}, + }, + }, + }, + Client: fakeclient.NewClientBuilder().WithObjects(&unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "reconcile", + "namespace": "default", + }, + "apiVersion": "operator-sdk/v1beta1", + "kind": "Testing", + }, + }).Build(), + Result: reconcile.Result{ + RequeueAfter: 5 * time.Second, + }, + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "reconcile", + Namespace: "default", + }, + }, + ExpectedObject: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "reconcile", + "namespace": "default", + }, + "apiVersion": "operator-sdk/v1beta1", + "kind": "Testing", + "spec": map[string]interface{}{}, + "status": map[string]interface{}{}, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + var aor reconcile.Reconciler = &controller.AnsibleOperatorReconciler{ + GVK: tc.GVK, + Runner: tc.Runner, + Client: tc.Client, + APIReader: tc.Client, + EventHandlers: tc.EventHandlers, + ReconcilePeriod: tc.ReconcilePeriod, + ManageStatus: tc.ManageStatus, + } + result, err := aor.Reconcile(context.TODO(), tc.Request) + if err != nil && !tc.ShouldError { + t.Fatalf("Unexpected error: %v", err) + } + if !reflect.DeepEqual(result, tc.Result) { + t.Fatalf("Reconcile result does not equal\nexpected: %#v\nactual: %#v", tc.Result, result) + } + if tc.ExpectedObject != nil { + actualObject := &unstructured.Unstructured{} + actualObject.SetGroupVersionKind(tc.ExpectedObject.GroupVersionKind()) + err := tc.Client.Get(context.TODO(), types.NamespacedName{ + Name: tc.ExpectedObject.GetName(), + Namespace: tc.ExpectedObject.GetNamespace(), + }, actualObject) + if err != nil { + t.Fatalf("Failed to get object: (%v)", err) + } + if !reflect.DeepEqual(actualObject.GetAnnotations(), tc.ExpectedObject.GetAnnotations()) { + t.Fatalf("Annotations are not the same\nexpected: %v\nactual: %v", + tc.ExpectedObject.GetAnnotations(), actualObject.GetAnnotations()) + } + if !reflect.DeepEqual(actualObject.GetFinalizers(), tc.ExpectedObject.GetFinalizers()) && + len(actualObject.GetFinalizers()) != 0 && len(tc.ExpectedObject.GetFinalizers()) != 0 { + t.Fatalf("Finalizers are not the same\nexpected: %#v\nactual: %#v", + tc.ExpectedObject.GetFinalizers(), actualObject.GetFinalizers()) + } + sMap, _ := tc.ExpectedObject.Object["status"].(map[string]interface{}) + expectedStatus := ansiblestatus.CreateFromMap(sMap) + sMap, _ = actualObject.Object["status"].(map[string]interface{}) + actualStatus := ansiblestatus.CreateFromMap(sMap) + if len(expectedStatus.Conditions) != len(actualStatus.Conditions) { + t.Fatalf("Status conditions not the same\nexpected: %v\nactual: %v", expectedStatus, + actualStatus) + } + for _, c := range expectedStatus.Conditions { + actualCond := ansiblestatus.GetCondition(actualStatus, c.Type) + if c.Reason != actualCond.Reason || c.Message != actualCond.Message || c.Status != + actualCond.Status { + t.Fatalf("Message or reason did not match\nexpected: %+v\nactual: %+v", c, actualCond) + } + if c.AnsibleResult == nil && actualCond.AnsibleResult != nil { + t.Fatalf("Ansible result did not match\nexpected: %+v\nactual: %+v", c.AnsibleResult, + actualCond.AnsibleResult) + } + if c.AnsibleResult != nil { + if !reflect.DeepEqual(c.AnsibleResult, actualCond.AnsibleResult) { + t.Fatalf("Ansible result did not match\nexpected: %+v\nactual: %+v", c.AnsibleResult, + actualCond.AnsibleResult) + } + } + } + } + }) + } +} diff --git a/internal/ansible/controller/status/types.go b/internal/ansible/controller/status/types.go new file mode 100644 index 0000000..3899252 --- /dev/null +++ b/internal/ansible/controller/status/types.go @@ -0,0 +1,200 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package status + +import ( + "encoding/json" + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/runner/eventapi" +) + +var log = logf.Log.WithName("controller.status") + +const ( + host = "localhost" +) + +// AnsibleResult - encapsulation of the ansible result. +type AnsibleResult struct { + Ok int `json:"ok"` + Changed int `json:"changed"` + Skipped int `json:"skipped"` + Failures int `json:"failures"` + TimeOfCompletion eventapi.EventTime `json:"completion"` +} + +// NewAnsibleResultFromStatusJobEvent - creates a Ansible status from job event. +func NewAnsibleResultFromStatusJobEvent(je eventapi.StatusJobEvent) *AnsibleResult { + // ok events. + a := &AnsibleResult{TimeOfCompletion: je.Created} + if v, ok := je.EventData.Changed[host]; ok { + a.Changed = v + } + if v, ok := je.EventData.Ok[host]; ok { + a.Ok = v + } + if v, ok := je.EventData.Skipped[host]; ok { + a.Skipped = v + } + if v, ok := je.EventData.Failures[host]; ok { + a.Failures = v + } + return a +} + +// NewAnsibleResultFromMap - creates a Ansible status from a job event. +func NewAnsibleResultFromMap(sm map[string]interface{}) *AnsibleResult { + //Create Old top level status + // ok events. + a := &AnsibleResult{} + if v, ok := sm["changed"]; ok { + a.Changed = int(v.(int64)) + } + if v, ok := sm["ok"]; ok { + a.Ok = int(v.(int64)) + } + if v, ok := sm["skipped"]; ok { + a.Skipped = int(v.(int64)) + } + if v, ok := sm["failures"]; ok { + a.Failures = int(v.(int64)) + } + if v, ok := sm["completion"]; ok { + s := v.(string) + if err := a.TimeOfCompletion.UnmarshalJSON([]byte(s)); err != nil { + log.Error(err, "Failed to unmarshal time of completion for ansible result") + } + } + return a +} + +// ConditionType - type of condition +type ConditionType string + +const ( + // RunningConditionType - condition type of running. + RunningConditionType ConditionType = "Running" + // FailureConditionType - condition type of failure. + FailureConditionType ConditionType = "Failure" + // SuccessfulConditionType - condition type of success. + SuccessfulConditionType ConditionType = "Successful" +) + +// Condition - the condition for the ansible operator. +type Condition struct { + Type ConditionType `json:"type"` + Status v1.ConditionStatus `json:"status"` + LastTransitionTime metav1.Time `json:"lastTransitionTime"` + AnsibleResult *AnsibleResult `json:"ansibleResult,omitempty"` + Reason string `json:"reason"` + Message string `json:"message"` +} + +func createConditionFromMap(cm map[string]interface{}) Condition { + ct, ok := cm["type"].(string) + if !ok { + //If we do not find the string we are defaulting + // to make sure we can at least update the status. + ct = string(RunningConditionType) + } + status, ok := cm["status"].(string) + if !ok { + status = string(v1.ConditionTrue) + } + reason, ok := cm["reason"].(string) + if !ok { + reason = "" + } + message, ok := cm["message"].(string) + if !ok { + message = "" + } + asm, ok := cm["ansibleResult"].(map[string]interface{}) + var ansibleResult *AnsibleResult + if ok { + ansibleResult = NewAnsibleResultFromMap(asm) + } + ltts, ok := cm["lastTransitionTime"].(string) + ltt := metav1.Now() + if ok { + t, err := time.Parse("2006-01-02T15:04:05Z", ltts) + if err != nil { + log.Info("Unable to parse time for status condition", "Time", ltts) + } else { + ltt = metav1.NewTime(t) + } + } + return Condition{ + Type: ConditionType(ct), + Status: v1.ConditionStatus(status), + LastTransitionTime: ltt, + Reason: reason, + Message: message, + AnsibleResult: ansibleResult, + } +} + +// Status - The status for custom resources managed by the operator-sdk. +type Status struct { + Conditions []Condition `json:"conditions"` + CustomStatus map[string]interface{} `json:"-"` +} + +// CreateFromMap - create a status from the map +func CreateFromMap(statusMap map[string]interface{}) Status { + customStatus := make(map[string]interface{}) + for key, value := range statusMap { + if key != "conditions" { + customStatus[key] = value + } + } + conditionsInterface, ok := statusMap["conditions"].([]interface{}) + if !ok { + return Status{Conditions: []Condition{}, CustomStatus: customStatus} + } + conditions := []Condition{} + for _, ci := range conditionsInterface { + cm, ok := ci.(map[string]interface{}) + if !ok { + log.Info("Unknown condition, removing condition", "ConditionInterface", ci) + continue + } + conditions = append(conditions, createConditionFromMap(cm)) + } + return Status{Conditions: conditions, CustomStatus: customStatus} +} + +// GetJSONMap - gets the map value for the status object. +// This is used to set the status on the CR. +// This is needed because the unstructured type has special rules around DeepCopy. +// If you do not convert the status to the map, then DeepCopy for the +// unstructured will fail and throw runtime exceptions. +// Please note that this will return an empty map on error. +func (status *Status) GetJSONMap() map[string]interface{} { + b, err := json.Marshal(status) + if err != nil { + log.Error(err, "Unable to marshal json") + return status.CustomStatus + } + if err := json.Unmarshal(b, &status.CustomStatus); err != nil { + log.Error(err, "Unable to unmarshal json") + } + return status.CustomStatus +} diff --git a/internal/ansible/controller/status/utils.go b/internal/ansible/controller/status/utils.go new file mode 100644 index 0000000..cbf2608 --- /dev/null +++ b/internal/ansible/controller/status/utils.go @@ -0,0 +1,96 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package status + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // RunningReason - Condition is running + RunningReason = "Running" + // SuccessfulReason - Condition is running due to reconcile being successful + SuccessfulReason = "Successful" + // FailedReason - Condition is failed due to ansible failure + FailedReason = "Failed" + // UnknownFailedReason - Condition is unknown + UnknownFailedReason = "Unknown" +) + +const ( + // RunningMessage - message for running reason. + RunningMessage = "Running reconciliation" + // SuccessfulMessage - message for successful reason. + AwaitingMessage = "Awaiting next reconciliation" + // SuccessfulMessage - message for successful condition. + SuccessfulMessage = "Last reconciliation succeeded" +) + +// NewCondition - condition +func NewCondition(condType ConditionType, status v1.ConditionStatus, ansibleResult *AnsibleResult, reason, + message string) *Condition { + return &Condition{ + Type: condType, + Status: status, + LastTransitionTime: metav1.Now(), + Reason: reason, + Message: message, + AnsibleResult: ansibleResult, + } +} + +// GetCondition returns the condition with the provided type. +func GetCondition(status Status, condType ConditionType) *Condition { + for i := range status.Conditions { + c := status.Conditions[i] + if c.Type == condType { + return &c + } + } + return nil +} + +// SetCondition updates the scheduledReport to include the provided condition. If the condition that +// we are about to add already exists and has the same status and reason then we are not going to update. +func SetCondition(status *Status, condition Condition) { + currentCond := GetCondition(*status, condition.Type) + if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason { + return + } + // Do not update lastTransitionTime if the status of the condition doesn't change. + if currentCond != nil && currentCond.Status == condition.Status { + condition.LastTransitionTime = currentCond.LastTransitionTime + } + newConditions := filterOutCondition(status.Conditions, condition.Type) + status.Conditions = append(newConditions, condition) +} + +// RemoveCondition removes the scheduledReport condition with the provided type. +func RemoveCondition(status *Status, condType ConditionType) { + status.Conditions = filterOutCondition(status.Conditions, condType) +} + +// filterOutCondition returns a new slice of scheduledReport conditions without conditions with the provided type. +func filterOutCondition(conditions []Condition, condType ConditionType) []Condition { + var newConditions []Condition + for _, c := range conditions { + if c.Type == condType { + continue + } + newConditions = append(newConditions, c) + } + return newConditions +} diff --git a/internal/ansible/controller/status/utils_test.go b/internal/ansible/controller/status/utils_test.go new file mode 100644 index 0000000..1671dc1 --- /dev/null +++ b/internal/ansible/controller/status/utils_test.go @@ -0,0 +1,302 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package status + +import ( + "reflect" + "testing" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestNewCondition(t *testing.T) { + testCases := []struct { + name string + condType ConditionType + status v1.ConditionStatus + ansibleResult *AnsibleResult + reason string + message string + expectedCondtion Condition + }{ + { + name: "running condition creating", + condType: RunningConditionType, + status: v1.ConditionTrue, + ansibleResult: nil, + reason: RunningReason, + message: RunningMessage, + expectedCondtion: Condition{ + Type: RunningConditionType, + Status: v1.ConditionTrue, + Reason: RunningReason, + Message: RunningMessage, + }, + }, + { + name: "failure condition creating", + condType: FailureConditionType, + status: v1.ConditionFalse, + ansibleResult: &AnsibleResult{ + Changed: 0, + Failures: 1, + Ok: 10, + Skipped: 1, + }, + reason: FailedReason, + message: "invalid parameter", + expectedCondtion: Condition{ + Type: FailureConditionType, + Status: v1.ConditionFalse, + Reason: FailedReason, + Message: "invalid parameter", + AnsibleResult: &AnsibleResult{ + Changed: 0, + Failures: 1, + Ok: 10, + Skipped: 1, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ac := NewCondition(tc.condType, tc.status, tc.ansibleResult, tc.reason, tc.message) + tc.expectedCondtion.LastTransitionTime = ac.LastTransitionTime + if !reflect.DeepEqual(*ac, tc.expectedCondtion) { + t.Fatalf("Condition did no match expected:\nActual: %#v\nExpected: %#v", *ac, tc.expectedCondtion) + } + }) + } +} + +func TestGetCondition(t *testing.T) { + testCases := []struct { + name string + condType ConditionType + status Status + expectedCondition *Condition + }{ + { + name: "find RunningCondition", + condType: RunningConditionType, + status: Status{ + Conditions: []Condition{ + Condition{ + Type: RunningConditionType, + }, + }, + }, + expectedCondition: &Condition{ + Type: RunningConditionType, + }, + }, + { + name: "did not find RunningCondition", + condType: RunningConditionType, + status: Status{ + Conditions: []Condition{ + Condition{ + Type: FailureConditionType, + }, + }, + }, + expectedCondition: nil, + }, + { + name: "find FailureCondition", + condType: FailureConditionType, + status: Status{ + Conditions: []Condition{ + Condition{ + Type: FailureConditionType, + }, + }, + }, + expectedCondition: &Condition{ + Type: FailureConditionType, + }, + }, + { + name: "did not find FailureCondition", + condType: FailureConditionType, + status: Status{ + Conditions: []Condition{ + Condition{ + Type: RunningConditionType, + }, + }, + }, + expectedCondition: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ac := GetCondition(tc.status, tc.condType) + if !reflect.DeepEqual(ac, tc.expectedCondition) { + t.Fatalf("Condition did no match expected:\nActual: %#v\nExpected: %#v", ac, tc.expectedCondition) + } + }) + } +} + +func TestRemoveCondition(t *testing.T) { + testCases := []struct { + name string + condType ConditionType + status Status + expectedSize int + }{ + { + name: "remove RunningCondition", + condType: RunningConditionType, + status: Status{ + Conditions: []Condition{ + Condition{ + Type: RunningConditionType, + }, + }, + }, + expectedSize: 0, + }, + { + name: "did not find RunningCondition", + condType: RunningConditionType, + status: Status{ + Conditions: []Condition{ + Condition{ + Type: FailureConditionType, + }, + }, + }, + expectedSize: 1, + }, + { + name: "remove FailureCondition", + condType: FailureConditionType, + status: Status{ + Conditions: []Condition{ + Condition{ + Type: FailureConditionType, + }, + }, + }, + expectedSize: 0, + }, + { + name: "did not find FailureCondition", + condType: FailureConditionType, + status: Status{ + Conditions: []Condition{ + Condition{ + Type: RunningConditionType, + }, + }, + }, + expectedSize: 1, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + RemoveCondition(&tc.status, tc.condType) + if tc.expectedSize != len(tc.status.Conditions) { + t.Fatalf("Conditions did no match expected size:\nActual: %#v\nExpected: %#v", + len(tc.status.Conditions), tc.expectedSize) + } + }) + } +} + +func TestSetCondition(t *testing.T) { + lastTransitionTime := metav1.Now() + keeptMessage := SuccessfulMessage + testCases := []struct { + name string + status *Status + condition *Condition + expectedNewSize int + keepLastTransitionTime bool + keepMessage bool + }{ + { + name: "add new condition", + status: &Status{ + Conditions: []Condition{}, + }, + condition: NewCondition(RunningConditionType, v1.ConditionTrue, nil, RunningReason, RunningMessage), + expectedNewSize: 1, + keepLastTransitionTime: false, + }, + { + name: "update running condition", + status: &Status{ + Conditions: []Condition{ + Condition{ + Type: RunningConditionType, + Status: v1.ConditionTrue, + Reason: SuccessfulReason, + Message: SuccessfulMessage, + LastTransitionTime: lastTransitionTime, + }, + }, + }, + condition: NewCondition(RunningConditionType, v1.ConditionTrue, nil, RunningReason, RunningMessage), + expectedNewSize: 1, + keepLastTransitionTime: true, + }, + { + name: "do not update running condition", + status: &Status{ + Conditions: []Condition{ + Condition{ + Type: RunningConditionType, + Status: v1.ConditionTrue, + Reason: RunningReason, + Message: SuccessfulMessage, + LastTransitionTime: lastTransitionTime, + }, + }, + }, + condition: NewCondition(RunningConditionType, v1.ConditionTrue, nil, RunningReason, RunningMessage), + expectedNewSize: 1, + keepLastTransitionTime: true, + keepMessage: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + SetCondition(tc.status, *tc.condition) + if tc.expectedNewSize != len(tc.status.Conditions) { + t.Fatalf("New size of conditions did not match expected\nActual: %v\nExpected: %v", + len(tc.status.Conditions), tc.expectedNewSize) + } + if tc.keepLastTransitionTime { + tc.condition.LastTransitionTime = lastTransitionTime + } + if tc.keepMessage { + tc.condition.Message = keeptMessage + } + ac := GetCondition(*tc.status, tc.condition.Type) + if !reflect.DeepEqual(ac, tc.condition) { + t.Fatalf("Condition did not match expected:\nActual: %#v\nExpected: %#v", ac, tc.condition) + } + }) + } +} diff --git a/internal/ansible/events/log_events.go b/internal/ansible/events/log_events.go new file mode 100644 index 0000000..96a9177 --- /dev/null +++ b/internal/ansible/events/log_events.go @@ -0,0 +1,189 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package events + +import ( + "errors" + "fmt" + "os" + "strconv" + "sync" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/runner/eventapi" +) + +// LogLevel - Levelt for the logging to take place. +type LogLevel int + +const ( + // Tasks - only log the high level tasks. + Tasks LogLevel = iota + + // Everything - log every event. + Everything + + // Nothing - this will log nothing. + Nothing +) + +// EventHandler - knows how to handle job events. +type EventHandler interface { + Handle(string, *unstructured.Unstructured, eventapi.JobEvent) +} + +type loggingEventHandler struct { + LogLevel LogLevel + mux *sync.Mutex +} + +func (l loggingEventHandler) Handle(ident string, u *unstructured.Unstructured, e eventapi.JobEvent) { + if l.LogLevel == Nothing { + return + } + + logger := logf.Log.WithName("logging_event_handler").WithValues( + "name", u.GetName(), + "namespace", u.GetNamespace(), + "gvk", u.GroupVersionKind().String(), + "event_type", e.Event, + "job", ident, + ) + + verbosity := GetVerbosity(u, e, ident) + + // logger only the following for the 'Tasks' LogLevel + if l.LogLevel == Tasks { + t, ok := e.EventData["task"] + if ok { + setFactAction := e.EventData["task_action"] == eventapi.TaskActionSetFact + debugAction := e.EventData["task_action"] == eventapi.TaskActionDebug + + if verbosity > 0 { + l.mux.Lock() + fmt.Println(e.StdOut) + l.mux.Unlock() + return + } + if e.Event == eventapi.EventPlaybookOnTaskStart && !setFactAction && !debugAction { + l.mux.Lock() + logger.Info("[playbook task start]", "EventData.Name", e.EventData["name"]) + l.logAnsibleStdOut(e) + l.mux.Unlock() + return + } + if e.Event == eventapi.EventRunnerOnOk && debugAction { + l.mux.Lock() + logger.Info("[playbook debug]", "EventData.TaskArgs", e.EventData["task_args"]) + l.logAnsibleStdOut(e) + l.mux.Unlock() + return + } + if e.Event == eventapi.EventRunnerItemOnOk { + l.mux.Lock() + l.logAnsibleStdOut(e) + l.mux.Unlock() + return + } + if e.Event == eventapi.EventRunnerOnFailed { + errKVs := []interface{}{ + "EventData.Task", t, + "EventData.TaskArgs", e.EventData["task_args"], + } + if taskPath, ok := e.EventData["task_path"]; ok { + errKVs = append(errKVs, "EventData.FailedTaskPath", taskPath) + } + l.mux.Lock() + logger.Error(errors.New("[playbook task failed]"), "", errKVs...) + l.logAnsibleStdOut(e) + l.mux.Unlock() + return + } + } + } + + // log everything else for the 'Everything' LogLevel + if l.LogLevel == Everything { + l.mux.Lock() + logger.Info("", "EventData", e.EventData) + l.logAnsibleStdOut(e) + l.mux.Unlock() + } +} + +// logAnsibleStdOut will print in the logs the Ansible Task Output formatted +func (l loggingEventHandler) logAnsibleStdOut(e eventapi.JobEvent) { + if len(e.StdOut) > 0 { + fmt.Printf("\n--------------------------- Ansible Task StdOut -------------------------------\n") + if e.Event != eventapi.EventPlaybookOnTaskStart { + fmt.Printf("\n TASK [%v] ******************************** \n", e.EventData["task"]) + } + fmt.Println(e.StdOut) + fmt.Printf("\n-------------------------------------------------------------------------------\n") + } +} + +// NewLoggingEventHandler - Creates a Logging Event Handler to log events. +func NewLoggingEventHandler(l LogLevel) EventHandler { + return loggingEventHandler{ + LogLevel: l, + mux: &sync.Mutex{}, + } +} + +// GetVerbosity - Parses the verbsoity from CR and environment variables +func GetVerbosity(u *unstructured.Unstructured, e eventapi.JobEvent, ident string) int { + logger := logf.Log.WithName("logging_event_handler").WithValues( + "name", u.GetName(), + "namespace", u.GetNamespace(), + "gvk", u.GroupVersionKind().String(), + "event_type", e.Event, + "job", ident, + ) + + // Parse verbosity from CR + verbosityAnnotation := 0 + if annot, exists := u.UnstructuredContent()["metadata"].(map[string]interface{})["annotations"]; exists { + if verbosityField, present := annot.(map[string]interface{})["ansible.sdk.operatorframework.io/verbosity"]; present { + var err error + verbosityAnnotation, err = strconv.Atoi(verbosityField.(string)) + if err != nil { + logger.Error(err, "Unable to parse verbosity value from CR.") + } + } + } + + // Parse verbosity from environment variable + verbosityEnvVar := 0 + everb := os.Getenv("ANSIBLE_VERBOSITY") + if everb != "" { + var err error + verbosityEnvVar, err = strconv.Atoi(everb) + if err != nil { + logger.Error(err, "Unable to parse verbosity value from environment variable.") + } + } + + // Return in order of precedence + if verbosityAnnotation > 0 { + return verbosityAnnotation + } else if verbosityEnvVar > 0 { + return verbosityEnvVar + } else { + return 0 // Default + } +} diff --git a/internal/ansible/flags/flag.go b/internal/ansible/flags/flag.go new file mode 100644 index 0000000..c78ebb2 --- /dev/null +++ b/internal/ansible/flags/flag.go @@ -0,0 +1,243 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package flags + +import ( + "runtime" + "time" + + "github.com/spf13/pflag" + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +// Flags - Options to be used by an ansible operator +type Flags struct { + ReconcilePeriod time.Duration + WatchesFile string + InjectOwnerRef bool + LeaderElection bool + MaxConcurrentReconciles int + AnsibleVerbosity int + AnsibleRolesPath string + AnsibleCollectionsPath string + MetricsBindAddress string + ProbeAddr string + LeaderElectionResourceLock string + LeaderElectionID string + LeaderElectionNamespace string + LeaseDuration time.Duration + RenewDeadline time.Duration + GracefulShutdownTimeout time.Duration + AnsibleArgs string + AnsibleLogEvents string + ProxyPort int + + // Path to a controller-runtime componentconfig file. + // If this is empty, use default values. + ManagerConfigPath string + + // If not nil, used to deduce which flags were set in the CLI. + flagSet *pflag.FlagSet +} + +const ( + AnsibleRolesPathEnvVar = "ANSIBLE_ROLES_PATH" + AnsibleCollectionsPathEnvVar = "ANSIBLE_COLLECTIONS_PATH" +) + +// AddTo - Add the ansible operator flags to the the flagset +func (f *Flags) AddTo(flagSet *pflag.FlagSet) { + // Store flagset internally to be used for lookups later. + f.flagSet = flagSet + + // Ansible flags. + flagSet.StringVar(&f.WatchesFile, + "watches-file", + "./watches.yaml", + "Path to the watches file to use", + ) + flagSet.BoolVar(&f.InjectOwnerRef, + "inject-owner-ref", + true, + "The ansible operator will inject owner references unless this flag is false", + ) + flagSet.IntVar(&f.AnsibleVerbosity, + "ansible-verbosity", + 2, + "Ansible verbosity. Overridden by environment variable.", + ) + flagSet.StringVar(&f.AnsibleRolesPath, + "ansible-roles-path", + "", + "Ansible Roles Path. If unset, roles are assumed to be in {{CWD}}/roles.", + ) + flagSet.StringVar(&f.AnsibleCollectionsPath, + "ansible-collections-path", + "", + "Path to installed Ansible Collections. If set, collections should be located in {{value}}/ansible_collections/. "+ + "If unset, collections are assumed to be in ~/.ansible/collections or /usr/share/ansible/collections.", + ) + flagSet.StringVar(&f.AnsibleArgs, + "ansible-args", + "", + "Ansible args. Allows user to specify arbitrary arguments for ansible-based operators.", + ) + + // Controller flags. + flagSet.DurationVar(&f.ReconcilePeriod, + "reconcile-period", + 10*time.Hour, + "Default reconcile period for controllers", + ) + flagSet.IntVar(&f.MaxConcurrentReconciles, + "max-concurrent-reconciles", + runtime.NumCPU(), + "Maximum number of concurrent reconciles for controllers. Overridden by environment variable.", + ) + + // Controller manager flags. + flagSet.StringVar(&f.ManagerConfigPath, + "config", + "", + "The controller will load its initial configuration from this file. "+ + "Omit this flag to use the default configuration values. "+ + "Command-line flags override configuration from this file.", + ) + // TODO(2.0.0): remove + flagSet.StringVar(&f.MetricsBindAddress, + "metrics-addr", + ":8080", + "The address the metric endpoint binds to", + ) + _ = flagSet.MarkDeprecated("metrics-addr", "use --metrics-bind-address instead") + flagSet.StringVar(&f.MetricsBindAddress, + "metrics-bind-address", + ":8080", + "The address the metric endpoint binds to", + ) + // TODO(2.0.0): for Go/Helm the port used is: 8081 + // update it to keep the project aligned to the other + flagSet.StringVar(&f.ProbeAddr, + "health-probe-bind-address", + ":6789", + "The address the probe endpoint binds to.", + ) + // TODO(2.0.0): remove + flagSet.BoolVar(&f.LeaderElection, + "enable-leader-election", + false, + "Enable leader election for controller manager. Enabling this will"+ + " ensure there is only one active controller manager.", + ) + _ = flagSet.MarkDeprecated("enable-leader-election", "use --leader-elect instead") + flagSet.BoolVar(&f.LeaderElection, + "leader-elect", + false, + "Enable leader election for controller manager. Enabling this will"+ + " ensure there is only one active controller manager.", + ) + flagSet.StringVar(&f.LeaderElectionID, + "leader-election-id", + "", + "Name of the configmap that is used for holding the leader lock.", + ) + flagSet.StringVar(&f.LeaderElectionNamespace, + "leader-election-namespace", + "", + "Namespace in which to create the leader election configmap for"+ + " holding the leader lock (required if running locally with leader"+ + " election enabled).", + ) + flagSet.StringVar(&f.LeaderElectionResourceLock, + "leader-elect-resource-lock", + "configmapsleases", + "The type of resource object that is used for locking during leader election."+ + " Supported options are 'leases', 'endpointsleases' and 'configmapsleases'. Default is configmapsleases.", + ) + flagSet.DurationVar(&f.LeaseDuration, + "leader-elect-lease-duration", + 15*time.Second, + "LeaseDuration is the duration that non-leader candidates will wait"+ + " to force acquire leadership. This is measured against time of last observed ack. Default is 15 seconds.", + ) + flagSet.DurationVar(&f.RenewDeadline, + "leader-elect-renew-deadline", + 10*time.Second, + "RenewDeadline is the duration that the acting controlplane will retry"+ + " refreshing leadership before giving up. Default is 10 seconds.", + ) + flagSet.DurationVar(&f.GracefulShutdownTimeout, + "graceful-shutdown-timeout", + 30*time.Second, + "The amount of time that will be spent waiting"+ + " for runners to gracefully exit.", + ) + flagSet.StringVar(&f.AnsibleLogEvents, + "ansible-log-events", + "tasks", + "Ansible log events. The log level for console logging."+ + " This flag can be set to either Nothing, Tasks, or Everything.", + ) + flagSet.IntVar(&f.ProxyPort, + "proxy-port", + 8888, + "Ansible proxy server port. Defaults to 8888.", + ) +} + +// ToManagerOptions uses the flag set in f to configure options. +// Values of options take precedence over flag defaults, +// as values are assume to have been explicitly set. +func (f *Flags) ToManagerOptions(options manager.Options) manager.Options { + // Alias FlagSet.Changed so options are still updated when fields are empty. + changed := func(flagName string) bool { + return f.flagSet.Changed(flagName) + } + if f.flagSet == nil { + changed = func(flagName string) bool { return false } + } + + // TODO(2.0.0): remove metrics-addr + if changed("metrics-bind-address") || changed("metrics-addr") || options.MetricsBindAddress == "" { + options.MetricsBindAddress = f.MetricsBindAddress + } + if changed("health-probe-bind-address") || options.HealthProbeBindAddress == "" { + options.HealthProbeBindAddress = f.ProbeAddr + } + // TODO(2.0.0): remove enable-leader-election + if changed("leader-elect") || changed("enable-leader-election") || !options.LeaderElection { + options.LeaderElection = f.LeaderElection + } + if changed("leader-election-id") || options.LeaderElectionID == "" { + options.LeaderElectionID = f.LeaderElectionID + } + if changed("leader-election-namespace") || options.LeaderElectionNamespace == "" { + options.LeaderElectionNamespace = f.LeaderElectionNamespace + } + if changed("leader-elect-lease-duration") || options.LeaseDuration == nil { + options.LeaseDuration = &f.LeaseDuration + } + if changed("leader-elect-renew-deadline") || options.RenewDeadline == nil { + options.RenewDeadline = &f.RenewDeadline + } + if changed("leader-elect-resource-lock") || options.LeaderElectionResourceLock == "" { + options.LeaderElectionResourceLock = f.LeaderElectionResourceLock + } + if changed("graceful-shutdown-timeout") || options.GracefulShutdownTimeout == nil { + options.GracefulShutdownTimeout = &f.GracefulShutdownTimeout + } + + return options +} diff --git a/internal/ansible/flags/flag_test.go b/internal/ansible/flags/flag_test.go new file mode 100644 index 0000000..bbbcf6d --- /dev/null +++ b/internal/ansible/flags/flag_test.go @@ -0,0 +1,72 @@ +// Copyright 2021 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package flags_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/spf13/pflag" + "sigs.k8s.io/controller-runtime/pkg/manager" + + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/flags" +) + +var _ = Describe("Flags", func() { + Describe("ToManagerOptions", func() { + var ( + f *flags.Flags + flagSet *pflag.FlagSet + options manager.Options + ) + BeforeEach(func() { + f = &flags.Flags{} + flagSet = pflag.NewFlagSet("test", pflag.ExitOnError) + f.AddTo(flagSet) + }) + + When("the flag is set", func() { + It("uses the flag value when corresponding option value is empty", func() { + expOptionValue := ":5678" + options.MetricsBindAddress = "" + parseArgs(flagSet, "--metrics-bind-address", expOptionValue) + Expect(f.ToManagerOptions(options).MetricsBindAddress).To(Equal(expOptionValue)) + }) + It("uses the flag value when corresponding option value is not empty", func() { + expOptionValue := ":5678" + options.MetricsBindAddress = ":1234" + parseArgs(flagSet, "--metrics-bind-address", expOptionValue) + Expect(f.ToManagerOptions(options).MetricsBindAddress).To(Equal(expOptionValue)) + }) + }) + When("the flag is not set", func() { + It("uses the default flag value when corresponding option value is empty", func() { + expOptionValue := ":8080" + options.MetricsBindAddress = "" + parseArgs(flagSet) + Expect(f.ToManagerOptions(options).MetricsBindAddress).To(Equal(expOptionValue)) + }) + It("uses the option value when corresponding option value is not empty", func() { + expOptionValue := ":1234" + options.MetricsBindAddress = expOptionValue + parseArgs(flagSet) + Expect(f.ToManagerOptions(options).MetricsBindAddress).To(Equal(expOptionValue)) + }) + }) + }) +}) + +func parseArgs(fs *pflag.FlagSet, extraArgs ...string) { + Expect(fs.Parse(extraArgs)).To(Succeed()) +} diff --git a/internal/ansible/flags/suite_test.go b/internal/ansible/flags/suite_test.go new file mode 100644 index 0000000..8850cb7 --- /dev/null +++ b/internal/ansible/flags/suite_test.go @@ -0,0 +1,27 @@ +// Copyright 2021 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package flags_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestFlags(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Flags Suite") +} diff --git a/internal/ansible/handler/handler_suite_test.go b/internal/ansible/handler/handler_suite_test.go new file mode 100644 index 0000000..ae169bf --- /dev/null +++ b/internal/ansible/handler/handler_suite_test.go @@ -0,0 +1,36 @@ +// Copyright 2021 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package handler + +import ( + "bytes" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var logBuffer bytes.Buffer + +func TestEventhandler(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Handler Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(&logBuffer), zap.UseDevMode(true))) +}) diff --git a/internal/ansible/handler/logging_enqueue_annotation.go b/internal/ansible/handler/logging_enqueue_annotation.go new file mode 100644 index 0000000..5728119 --- /dev/null +++ b/internal/ansible/handler/logging_enqueue_annotation.go @@ -0,0 +1,113 @@ +// Copyright 2021 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package handler + +import ( + "strings" + + "github.com/operator-framework/operator-lib/handler" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" +) + +// LoggingEnqueueRequestForAnnotation wraps operator-lib handler for +// "InstrumentedEnqueueRequestForObject", and logs the events as they occur +// +// &handler.LoggingEnqueueRequestForAnnotation{} +type LoggingEnqueueRequestForAnnotation struct { + handler.EnqueueRequestForAnnotation +} + +// Create implements EventHandler, and emits a log message. +func (h LoggingEnqueueRequestForAnnotation) Create(e event.CreateEvent, q workqueue.RateLimitingInterface) { + h.logEvent("Create", e.Object, nil) + h.EnqueueRequestForAnnotation.Create(e, q) +} + +// Update implements EventHandler, and emits a log message. +func (h LoggingEnqueueRequestForAnnotation) Update(e event.UpdateEvent, q workqueue.RateLimitingInterface) { + h.logEvent("Update", e.ObjectOld, e.ObjectNew) + h.EnqueueRequestForAnnotation.Update(e, q) +} + +// Delete implements EventHandler, and emits a log message. +func (h LoggingEnqueueRequestForAnnotation) Delete(e event.DeleteEvent, q workqueue.RateLimitingInterface) { + h.logEvent("Delete", e.Object, nil) + h.EnqueueRequestForAnnotation.Delete(e, q) +} + +// Generic implements EventHandler, and emits a log message. +func (h LoggingEnqueueRequestForAnnotation) Generic(e event.GenericEvent, q workqueue.RateLimitingInterface) { + h.logEvent("Generic", e.Object, nil) + h.EnqueueRequestForAnnotation.Generic(e, q) +} + +func (h LoggingEnqueueRequestForAnnotation) logEvent(eventType string, object, newObject client.Object) { + typeString, name, namespace := extractTypedOwnerAnnotations(h.EnqueueRequestForAnnotation.Type, object) + if newObject != nil && typeString == "" { + typeString, name, namespace = extractTypedOwnerAnnotations(h.EnqueueRequestForAnnotation.Type, newObject) + } + + if name != "" && typeString != "" { + kvs := []interface{}{ + "Event type", eventType, + "GroupVersionKind", object.GetObjectKind().GroupVersionKind().String(), + "Name", object.GetName(), + } + if objectNs := object.GetNamespace(); objectNs != "" { + kvs = append(kvs, "Namespace", objectNs) + } + + kvs = append(kvs, + "Owner GroupKind", typeString, + "Owner Name", name, + ) + if namespace != "" { + kvs = append(kvs, "Owner Namespace", namespace) + } + + log.V(1).Info("Annotation handler event", kvs...) + } +} + +func extractTypedOwnerAnnotations(ownerGK schema.GroupKind, object metav1.Object) (string, string, string) { + annotations := object.GetAnnotations() + if len(annotations) == 0 { + return "", "", "" + } + if typeString, ok := annotations[handler.TypeAnnotation]; ok && typeString == ownerGK.String() { + if namespacedNameString, ok := annotations[handler.NamespacedNameAnnotation]; ok { + parsed := parseNamespacedName(namespacedNameString) + return typeString, parsed.Name, parsed.Namespace + } + } + return "", "", "" +} + +// parseNamespacedName parses the provided string to extract the namespace and name into a +// types.NamespacedName. The edge case of empty string is handled prior to calling this function. +func parseNamespacedName(namespacedNameString string) types.NamespacedName { + values := strings.SplitN(namespacedNameString, "/", 2) + + switch len(values) { + case 1: + return types.NamespacedName{Name: values[0]} + default: + return types.NamespacedName{Namespace: values[0], Name: values[1]} + } +} diff --git a/internal/ansible/handler/logging_enqueue_annotation_test.go b/internal/ansible/handler/logging_enqueue_annotation_test.go new file mode 100644 index 0000000..4d7944a --- /dev/null +++ b/internal/ansible/handler/logging_enqueue_annotation_test.go @@ -0,0 +1,442 @@ +// Copyright 2021 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package handler + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/operator-framework/operator-lib/handler" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/controller/controllertest" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "k8s.io/client-go/util/workqueue" +) + +var _ = Describe("LoggingEnqueueRequestForAnnotation", func() { + var q workqueue.RateLimitingInterface + var instance LoggingEnqueueRequestForAnnotation + var pod *corev1.Pod + var podOwner *corev1.Pod + + BeforeEach(func() { + q = controllertest.Queue{Interface: workqueue.New()} + pod = &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "biz", + Name: "biz", + }, + } + podOwner = &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "podOwnerNs", + Name: "podOwnerName", + }, + } + + pod.SetGroupVersionKind(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}) + podOwner.SetGroupVersionKind(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}) + + Expect(handler.SetOwnerAnnotations(podOwner, pod)).To(Succeed()) + instance = LoggingEnqueueRequestForAnnotation{ + handler.EnqueueRequestForAnnotation{ + Type: schema.GroupKind{ + Group: "", + Kind: "Pod", + }}} + }) + + Describe("Create", func() { + It("should emit a log and enqueue a Request with the annotations of the object in case of CreateEvent", func() { + evt := event.CreateEvent{ + Object: pod, + } + + logBuffer.Reset() + instance.Create(evt, q) + Expect(logBuffer.String()).To(MatchRegexp( + `ansible.handler.*Create.*/v1.*Pod.*biz.*biz.*Pod.*podOwnerName.*podOwnerNs`, + )) + Expect(q.Len()).To(Equal(1)) + + i, _ := q.Get() + Expect(i).To(Equal(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: podOwner.Namespace, + Name: podOwner.Name, + }, + })) + }) + + It("should enqueue a Request to the owner resource when the annotations are applied in child object"+ + " in the Create Event", func() { + repl := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "faz", + }, + } + repl.SetGroupVersionKind(schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ReplicaSet"}) + + Expect(handler.SetOwnerAnnotations(podOwner, repl)).To(Succeed()) + + evt := event.CreateEvent{ + Object: repl, + } + logBuffer.Reset() + instance.Create(evt, q) + Expect(logBuffer.String()).To(MatchRegexp( + `ansible.handler.*Create.*apps/v1.*ReplicaSet.*faz.*foo.*Pod.*podOwnerName.*podOwnerNs`, + )) + Expect(q.Len()).To(Equal(1)) + + i, _ := q.Get() + Expect(i).To(Equal(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: podOwner.Namespace, + Name: podOwner.Name, + }, + })) + }) + It("should not emit a log or enqueue a request if there are no annotations matching with the object", func() { + repl := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "faz", + }, + } + repl.SetGroupVersionKind(schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ReplicaSet"}) + + evt := event.CreateEvent{ + Object: repl, + } + + logBuffer.Reset() + instance.Create(evt, q) + Expect(logBuffer.String()).To(Not(ContainSubstring("ansible.handler"))) + Expect(q.Len()).To(Equal(0)) + }) + It("should not emit a log or enqueue a Request if there is no Namespace and name annotation matching the specified object are found", func() { + repl := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "faz", + Annotations: map[string]string{ + handler.TypeAnnotation: schema.GroupKind{Group: "", Kind: "Pod"}.String(), + }, + }, + } + repl.SetGroupVersionKind(schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ReplicaSet"}) + + evt := event.CreateEvent{ + Object: repl, + } + + logBuffer.Reset() + instance.Create(evt, q) + Expect(logBuffer.String()).To(Not(ContainSubstring("ansible.handler"))) + Expect(q.Len()).To(Equal(0)) + }) + It("should not emit a log or enqueue a Request if there is no TypeAnnotation matching the specified Group and Kind", func() { + repl := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "faz", + + Annotations: map[string]string{ + handler.NamespacedNameAnnotation: "AppService", + }, + }, + } + repl.SetGroupVersionKind(schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ReplicaSet"}) + + evt := event.CreateEvent{ + Object: repl, + } + + logBuffer.Reset() + instance.Create(evt, q) + Expect(logBuffer.String()).To(Not(ContainSubstring("ansible.handler"))) + Expect(q.Len()).To(Equal(0)) + }) + It("should emit a log and enqueue a Request if there are no Namespace annotation matching the object", func() { + repl := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "faz", + Annotations: map[string]string{ + handler.NamespacedNameAnnotation: "AppService", + handler.TypeAnnotation: schema.GroupKind{Group: "", Kind: "Pod"}.String(), + }, + }, + } + repl.SetGroupVersionKind(schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ReplicaSet"}) + + evt := event.CreateEvent{ + Object: repl, + } + + logBuffer.Reset() + instance.Create(evt, q) + Expect(logBuffer.String()).To(MatchRegexp( + `ansible.handler.*Create.*apps/v1.*ReplicaSet.*faz.*foo.*Pod.*AppService`, + )) + Expect(q.Len()).To(Equal(1)) + + i, _ := q.Get() + Expect(i).To(Equal(reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: "", Name: "AppService"}})) + }) + It("should emit a log and enqueue a Request for an object that is cluster scoped which has the annotations", func() { + nd := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + Annotations: map[string]string{ + handler.NamespacedNameAnnotation: "myapp", + handler.TypeAnnotation: schema.GroupKind{Group: "apps", Kind: "ReplicaSet"}.String(), + }, + }, + } + nd.SetGroupVersionKind(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Node"}) + + instance = LoggingEnqueueRequestForAnnotation{handler.EnqueueRequestForAnnotation{Type: schema.GroupKind{Group: "apps", Kind: "ReplicaSet"}}} + + evt := event.CreateEvent{ + Object: nd, + } + + logBuffer.Reset() + instance.Create(evt, q) + Expect(logBuffer.String()).To(MatchRegexp( + `ansible.handler.*Create.*/v1.*Node.*node-1.*ReplicaSet.apps.*myapp.*`, + )) + Expect(q.Len()).To(Equal(1)) + + i, _ := q.Get() + Expect(i).To(Equal(reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: "", Name: "myapp"}})) + }) + It("should not emit a log or enqueue a Request for an object that is cluster scoped which does not have annotations", func() { + nd := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: "node-1"}, + } + nd.SetGroupVersionKind(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Node"}) + + instance = LoggingEnqueueRequestForAnnotation{handler.EnqueueRequestForAnnotation{Type: nd.GetObjectKind().GroupVersionKind().GroupKind()}} + evt := event.CreateEvent{ + Object: nd, + } + + logBuffer.Reset() + instance.Create(evt, q) + Expect(logBuffer.String()).To(Not(ContainSubstring("ansible.handler"))) + Expect(q.Len()).To(Equal(0)) + }) + }) + + Describe("Delete", func() { + It("should emit a log and enqueue a Request with the annotations of the object in case of DeleteEvent", func() { + evt := event.DeleteEvent{ + Object: pod, + } + logBuffer.Reset() + instance.Delete(evt, q) + Expect(logBuffer.String()).To(MatchRegexp( + `ansible.handler.*Delete.*/v1.*Pod.*biz.*biz.*Pod.*podOwnerName.*podOwnerNs`, + )) + Expect(q.Len()).To(Equal(1)) + + i, _ := q.Get() + Expect(i).To(Equal(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: podOwner.Namespace, + Name: podOwner.Name, + }, + })) + }) + }) + + Describe("Update", func() { + It("should emit a log and enqueue a Request with annotations applied to both objects in UpdateEvent", func() { + newPod := pod.DeepCopy() + newPod.Name = pod.Name + "2" + newPod.Namespace = pod.Namespace + "2" + + Expect(handler.SetOwnerAnnotations(podOwner, pod)).To(Succeed()) + + evt := event.UpdateEvent{ + ObjectOld: pod, + ObjectNew: newPod, + } + + logBuffer.Reset() + instance.Update(evt, q) + Expect(logBuffer.String()).To(MatchRegexp( + `ansible.handler.*Update.*/v1.*Pod.*biz.*biz.*Pod.*podOwnerName.*podOwnerNs`, + )) + Expect(q.Len()).To(Equal(1)) + + i, _ := q.Get() + Expect(i).To(Equal(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: podOwner.Namespace, + Name: podOwner.Name, + }, + })) + }) + It("should emit a log and enqueue a Request with the annotations applied in one of the objects in case of UpdateEvent", func() { + newPod := pod.DeepCopy() + newPod.Name = pod.Name + "2" + newPod.Namespace = pod.Namespace + "2" + newPod.Annotations = map[string]string{} + + evt := event.UpdateEvent{ + ObjectOld: pod, + ObjectNew: newPod, + } + + logBuffer.Reset() + instance.Update(evt, q) + Expect(logBuffer.String()).To(MatchRegexp( + `ansible.handler.*Update.*/v1.*Pod.*biz.*biz.*Pod.*podOwnerName.*podOwnerNs`, + )) + Expect(q.Len()).To(Equal(1)) + i, _ := q.Get() + + Expect(i).To(Equal(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: podOwner.Namespace, + Name: podOwner.Name, + }, + })) + }) + It("should emit a log and enqueue a Request when the annotations are applied in a different resource in case of UpdateEvent", func() { + repl := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "faz", + }, + } + repl.SetGroupVersionKind(schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ReplicaSet"}) + + instance = LoggingEnqueueRequestForAnnotation{ + handler.EnqueueRequestForAnnotation{ + Type: schema.GroupKind{ + Group: "apps", + Kind: "ReplicaSet", + }}} + + evt := event.CreateEvent{ + Object: repl, + } + + logBuffer.Reset() + instance.Create(evt, q) + Expect(logBuffer.String()).To(Not(ContainSubstring("ansible.handler"))) + Expect(q.Len()).To(Equal(0)) + + newRepl := repl.DeepCopy() + newRepl.Name = pod.Name + "2" + newRepl.Namespace = pod.Namespace + "2" + + newRepl.Annotations = map[string]string{ + handler.TypeAnnotation: schema.GroupKind{Group: "apps", Kind: "ReplicaSet"}.String(), + handler.NamespacedNameAnnotation: "foo/faz", + } + + instance2 := LoggingEnqueueRequestForAnnotation{ + handler.EnqueueRequestForAnnotation{ + Type: schema.GroupKind{ + Group: "apps", + Kind: "ReplicaSet", + }}} + + evt2 := event.UpdateEvent{ + ObjectOld: repl, + ObjectNew: newRepl, + } + + logBuffer.Reset() + instance2.Update(evt2, q) + Expect(logBuffer.String()).To(MatchRegexp( + `ansible.handler.*Update.*apps/v1.*ReplicaSet.*faz.*foo.*ReplicaSet.apps.*faz.*foo`, + )) + Expect(q.Len()).To(Equal(1)) + + i, _ := q.Get() + Expect(i).To(Equal(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: "foo", + Name: "faz", + }, + })) + }) + It("should emit a log and enqueue multiple Update Requests when different annotations are applied to multiple objects", func() { + newPod := pod.DeepCopy() + newPod.Name = pod.Name + "2" + newPod.Namespace = pod.Namespace + "2" + + Expect(handler.SetOwnerAnnotations(podOwner, pod)).To(Succeed()) + + var podOwner2 = &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "podOwnerNsTest", + Name: "podOwnerNameTest", + }, + } + podOwner2.SetGroupVersionKind(schema.GroupVersionKind{Group: "", Kind: "Pod"}) + + Expect(handler.SetOwnerAnnotations(podOwner2, newPod)).To(Succeed()) + + evt := event.UpdateEvent{ + ObjectOld: pod, + ObjectNew: newPod, + } + logBuffer.Reset() + instance.Update(evt, q) + Expect(logBuffer.String()).To(MatchRegexp( + `ansible.handler.*Update.*/v1.*Pod.*biz.*biz.*Pod.*podOwnerName.*podOwnerNs`, + )) + Expect(q.Len()).To(Equal(2)) + }) + }) + + Describe("Generic", func() { + It("should enqueue a Request with the annotations of the object in case of GenericEvent", func() { + evt := event.GenericEvent{ + Object: pod, + } + logBuffer.Reset() + instance.Generic(evt, q) + Expect(logBuffer.String()).To(MatchRegexp( + `ansible.handler.*Generic.*/v1.*Pod.*biz.*biz.*Pod.*podOwnerName.*podOwnerNs`, + )) + Expect(q.Len()).To(Equal(1)) + + i, _ := q.Get() + Expect(i).To(Equal(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: podOwner.Namespace, + Name: podOwner.Name, + }, + })) + }) + }) +}) diff --git a/internal/ansible/handler/logging_enqueue_object.go b/internal/ansible/handler/logging_enqueue_object.go new file mode 100644 index 0000000..0aeb8b4 --- /dev/null +++ b/internal/ansible/handler/logging_enqueue_object.go @@ -0,0 +1,69 @@ +// Copyright 2021 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package handler + +import ( + "github.com/operator-framework/operator-lib/handler" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("ansible").WithName("handler") + +// LoggingEnqueueRequestForObject wraps operator-lib handler for +// "InstrumentedEnqueueRequestForObject", and logs the events as they occur +// +// &handler.LoggingEnqueueRequestForObject{} +type LoggingEnqueueRequestForObject struct { + handler.InstrumentedEnqueueRequestForObject +} + +// Create implements EventHandler, and emits a log message. +func (h LoggingEnqueueRequestForObject) Create(e event.CreateEvent, q workqueue.RateLimitingInterface) { + h.logEvent("Create", e.Object) + h.InstrumentedEnqueueRequestForObject.Create(e, q) +} + +// Update implements EventHandler, and emits a log message. +func (h LoggingEnqueueRequestForObject) Update(e event.UpdateEvent, q workqueue.RateLimitingInterface) { + h.logEvent("Update", e.ObjectOld) + h.InstrumentedEnqueueRequestForObject.Update(e, q) +} + +// Delete implements EventHandler, and emits a log message. +func (h LoggingEnqueueRequestForObject) Delete(e event.DeleteEvent, q workqueue.RateLimitingInterface) { + h.logEvent("Delete", e.Object) + h.InstrumentedEnqueueRequestForObject.Delete(e, q) +} + +// Generic implements EventHandler, and emits a log message. +func (h LoggingEnqueueRequestForObject) Generic(e event.GenericEvent, q workqueue.RateLimitingInterface) { + h.logEvent("Generic", e.Object) + h.EnqueueRequestForObject.Generic(e, q) +} + +func (h LoggingEnqueueRequestForObject) logEvent(eventType string, object client.Object) { + kvs := []interface{}{ + "Event type", eventType, + "GroupVersionKind", object.GetObjectKind().GroupVersionKind().String(), + "Name", object.GetName(), + } + if objectNs := object.GetNamespace(); objectNs != "" { + kvs = append(kvs, "Namespace", objectNs) + } + + log.V(1).Info("Metrics handler event", kvs...) +} diff --git a/internal/ansible/handler/logging_enqueue_object_test.go b/internal/ansible/handler/logging_enqueue_object_test.go new file mode 100644 index 0000000..00420d6 --- /dev/null +++ b/internal/ansible/handler/logging_enqueue_object_test.go @@ -0,0 +1,218 @@ +// Copyright 2021 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package handler + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + dto "github.com/prometheus/client_model/go" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/controller/controllertest" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/metrics" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "k8s.io/client-go/util/workqueue" +) + +var _ = Describe("LoggingEnqueueRequestForObject", func() { + var q workqueue.RateLimitingInterface + var instance LoggingEnqueueRequestForObject + var pod *corev1.Pod + + BeforeEach(func() { + logBuffer.Reset() + q = controllertest.Queue{Interface: workqueue.New()} + instance = LoggingEnqueueRequestForObject{} + pod = &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "biznamespace", + Name: "bizname", + CreationTimestamp: metav1.Now(), + }, + } + }) + Describe("Create", func() { + It("should emit a log, enqueue a request & emit a metric on a CreateEvent", func() { + evt := event.CreateEvent{ + Object: pod, + } + + // test the create + logBuffer.Reset() + instance.Create(evt, q) + Expect(logBuffer.String()).To(MatchRegexp( + `ansible.handler.*Create.*/v1.*Pod.*bizname.*biznamespace`, + )) + + // verify workqueue + Expect(q.Len()).To(Equal(1)) + i, _ := q.Get() + Expect(i).To(Equal(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: pod.Namespace, + Name: pod.Name, + }, + })) + + // verify metrics + gauges, err := metrics.Registry.Gather() + Expect(err).NotTo(HaveOccurred()) + Expect(gauges).To(HaveLen(1)) + assertMetrics(gauges[0], 1, []*corev1.Pod{pod}) + }) + }) + + Describe("Delete", func() { + Context("when a gauge already exists", func() { + BeforeEach(func() { + evt := event.CreateEvent{ + Object: pod, + } + logBuffer.Reset() + instance.Create(evt, q) + Expect(logBuffer.String()).To(MatchRegexp( + `ansible.handler.*Create.*/v1.*Pod.*bizname.*biznamespace`, + )) + Expect(q.Len()).To(Equal(1)) + }) + It("should emit a log, enqueue a request & remove the metric on a DeleteEvent", func() { + evt := event.DeleteEvent{ + Object: pod, + } + + logBuffer.Reset() + // test the delete + instance.Delete(evt, q) + Expect(logBuffer.String()).To(MatchRegexp( + `ansible.handler.*Delete.*/v1.*Pod.*bizname.*biznamespace`, + )) + + // verify workqueue + Expect(q.Len()).To(Equal(1)) + i, _ := q.Get() + Expect(i).To(Equal(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: pod.Namespace, + Name: pod.Name, + }, + })) + + // verify metrics + gauges, err := metrics.Registry.Gather() + Expect(err).NotTo(HaveOccurred()) + Expect(gauges).To(BeEmpty()) + }) + }) + Context("when a gauge does not exist", func() { + It("should emit a log, enqueue a request & there should be no new metric on a DeleteEvent", func() { + evt := event.DeleteEvent{ + Object: pod, + } + + logBuffer.Reset() + // test the delete + instance.Delete(evt, q) + Expect(logBuffer.String()).To(MatchRegexp( + `ansible.handler.*Delete.*/v1.*Pod.*bizname.*biznamespace`, + )) + + // verify workqueue + Expect(q.Len()).To(Equal(1)) + i, _ := q.Get() + Expect(i).To(Equal(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: pod.Namespace, + Name: pod.Name, + }, + })) + + // verify metrics + gauges, err := metrics.Registry.Gather() + Expect(err).NotTo(HaveOccurred()) + Expect(gauges).To(BeEmpty()) + }) + }) + + }) + + Describe("Update", func() { + It("should emit a log and enqueue a request in case of UpdateEvent", func() { + newpod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "baznamespace", + Name: "bazname", + }, + } + evt := event.UpdateEvent{ + ObjectOld: pod, + ObjectNew: newpod, + } + + logBuffer.Reset() + // test the update + instance.Update(evt, q) + Expect(logBuffer.String()).To(MatchRegexp( + `ansible.handler.*Update.*/v1.*Pod.*bizname.*biznamespace`, + )) + + // verify workqueue + Expect(q.Len()).To(Equal(1)) + i, _ := q.Get() + Expect(i).To(Equal(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: newpod.Namespace, + Name: newpod.Name, + }, + })) + + // verify metrics + gauges, err := metrics.Registry.Gather() + Expect(err).NotTo(HaveOccurred()) + Expect(gauges).To(HaveLen(1)) + assertMetrics(gauges[0], 2, []*corev1.Pod{newpod, pod}) + }) + }) +}) + +func assertMetrics(gauge *dto.MetricFamily, count int, pods []*corev1.Pod) { + Expect(gauge.Metric).To(HaveLen(count)) + for i := 0; i < count; i++ { + Expect(*gauge.Metric[i].Gauge.Value).To(Equal(float64(pods[i].GetObjectMeta().GetCreationTimestamp().UTC().Unix()))) + + for _, l := range gauge.Metric[i].Label { + if l.Name != nil { + switch *l.Name { + case "name": + Expect(l.Value).To(HaveValue(Equal(pods[i].GetObjectMeta().GetName()))) + case "namespace": + Expect(l.Value).To(HaveValue(Equal(pods[i].GetObjectMeta().GetNamespace()))) + case "group": + Expect(l.Value).To(HaveValue(Equal(pods[i].GetObjectKind().GroupVersionKind().Group))) + case "version": + Expect(l.Value).To(HaveValue(Equal(pods[i].GetObjectKind().GroupVersionKind().Version))) + case "kind": + Expect(l.Value).To(HaveValue(Equal(pods[i].GetObjectKind().GroupVersionKind().Kind))) + } + } + } + } +} diff --git a/internal/ansible/handler/logging_enqueue_owner.go b/internal/ansible/handler/logging_enqueue_owner.go new file mode 100644 index 0000000..8331a10 --- /dev/null +++ b/internal/ansible/handler/logging_enqueue_owner.go @@ -0,0 +1,97 @@ +// Copyright 2021 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package handler + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + crHandler "sigs.k8s.io/controller-runtime/pkg/handler" +) + +// LoggingEnqueueRequestForOwner wraps operator-lib handler for +// "InstrumentedEnqueueRequestForObject", and logs the events as they occur +// +// &handler.LoggingEnqueueRequestForOwner{} +type LoggingEnqueueRequestForOwner struct { + crHandler.EnqueueRequestForOwner +} + +// Create implements EventHandler, and emits a log message. +func (h LoggingEnqueueRequestForOwner) Create(e event.CreateEvent, q workqueue.RateLimitingInterface) { + h.logEvent("Create", e.Object, nil) + h.EnqueueRequestForOwner.Create(e, q) +} + +// Update implements EventHandler, and emits a log message. +func (h LoggingEnqueueRequestForOwner) Update(e event.UpdateEvent, q workqueue.RateLimitingInterface) { + h.logEvent("Update", e.ObjectOld, e.ObjectNew) + h.EnqueueRequestForOwner.Update(e, q) +} + +// Delete implements EventHandler, and emits a log message. +func (h LoggingEnqueueRequestForOwner) Delete(e event.DeleteEvent, q workqueue.RateLimitingInterface) { + h.logEvent("Delete", e.Object, nil) + h.EnqueueRequestForOwner.Delete(e, q) +} + +// Generic implements EventHandler, and emits a log message. +func (h LoggingEnqueueRequestForOwner) Generic(e event.GenericEvent, q workqueue.RateLimitingInterface) { + h.logEvent("Generic", e.Object, nil) + h.EnqueueRequestForOwner.Generic(e, q) +} + +func (h LoggingEnqueueRequestForOwner) logEvent(eventType string, object, newObject client.Object) { + ownerReference := extractTypedOwnerReference(h.EnqueueRequestForOwner.OwnerType.GetObjectKind().GroupVersionKind(), object.GetOwnerReferences()) + if ownerReference == nil && newObject != nil { + ownerReference = extractTypedOwnerReference(h.EnqueueRequestForOwner.OwnerType.GetObjectKind().GroupVersionKind(), newObject.GetOwnerReferences()) + } + + // If no ownerReference was found then it's probably not an event we care about + if ownerReference != nil { + kvs := []interface{}{ + "Event type", eventType, + "GroupVersionKind", object.GetObjectKind().GroupVersionKind().String(), + "Name", object.GetName(), + } + if objectNs := object.GetNamespace(); objectNs != "" { + kvs = append(kvs, "Namespace", objectNs) + } + kvs = append(kvs, + "Owner APIVersion", ownerReference.APIVersion, + "Owner Kind", ownerReference.Kind, + "Owner Name", ownerReference.Name, + ) + + log.V(1).Info("OwnerReference handler event", kvs...) + } +} + +func extractTypedOwnerReference(ownerGVK schema.GroupVersionKind, ownerReferences []metav1.OwnerReference) *metav1.OwnerReference { + for _, ownerRef := range ownerReferences { + refGV, err := schema.ParseGroupVersion(ownerRef.APIVersion) + if err != nil { + log.Error(err, "Could not parse OwnerReference APIVersion", + "api version", ownerRef.APIVersion) + } + + if ownerGVK.Group == refGV.Group && + ownerGVK.Kind == ownerRef.Kind { + return &ownerRef + } + } + return nil +} diff --git a/internal/ansible/handler/logging_enqueue_owner_test.go b/internal/ansible/handler/logging_enqueue_owner_test.go new file mode 100644 index 0000000..d2f7331 --- /dev/null +++ b/internal/ansible/handler/logging_enqueue_owner_test.go @@ -0,0 +1,269 @@ +// Copyright 2021 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package handler + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/controller/controllertest" + "sigs.k8s.io/controller-runtime/pkg/event" + crHandler "sigs.k8s.io/controller-runtime/pkg/handler" + + "k8s.io/client-go/util/workqueue" +) + +var _ = Describe("LoggingEnqueueRequestForOwner", func() { + var q workqueue.RateLimitingInterface + var instance LoggingEnqueueRequestForOwner + var pod *corev1.Pod + var podOwner *metav1.OwnerReference + + BeforeEach(func() { + q = controllertest.Queue{Interface: workqueue.New()} + podOwner = &metav1.OwnerReference{ + Kind: "Pod", + APIVersion: "v1", + Name: "podOwnerName", + } + + pod = &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "biz", + Name: "biz", + OwnerReferences: []metav1.OwnerReference{*podOwner}, + }, + } + + pod.SetGroupVersionKind(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}) + + instance = LoggingEnqueueRequestForOwner{ + crHandler.EnqueueRequestForOwner{ + OwnerType: pod, + }} + }) + + Describe("Create", func() { + It("should emit a log with the ownerReference of the object in case of CreateEvent", func() { + evt := event.CreateEvent{ + Object: pod, + } + + logBuffer.Reset() + instance.Create(evt, q) + Expect(logBuffer.String()).To(MatchRegexp( + `ansible.handler.*Create.*/v1.*Pod.*biz.*biz.*v1.*Pod.*podOwnerName`, + )) + }) + + It("emit a log when the ownerReferences are applied in child object"+ + " in the Create Event", func() { + repl := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "faz", + OwnerReferences: []metav1.OwnerReference{*podOwner}, + }, + } + repl.SetGroupVersionKind(schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ReplicaSet"}) + + evt := event.CreateEvent{ + Object: repl, + } + logBuffer.Reset() + instance.Create(evt, q) + Expect(logBuffer.String()).To(MatchRegexp( + `ansible.handler.*Create.*apps/v1.*ReplicaSet.*faz.*foo.*Pod.*podOwnerName`, + )) + }) + It("should not emit a log or there are no ownerReferences matching with the object", func() { + repl := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "faz", + }, + } + repl.SetGroupVersionKind(schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ReplicaSet"}) + + evt := event.CreateEvent{ + Object: repl, + } + + logBuffer.Reset() + instance.Create(evt, q) + Expect(logBuffer.String()).To(Not(ContainSubstring("ansible.handler"))) + Expect(q.Len()).To(Equal(0)) + }) + It("should not emit a log if the ownerReference does not match the OwnerType", func() { + repl := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "faz", + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "v1", + Kind: "ConfigMap", + Name: "podOwnerName", + }}, + }, + } + repl.SetGroupVersionKind(schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ReplicaSet"}) + + evt := event.CreateEvent{ + Object: repl, + } + + logBuffer.Reset() + instance.Create(evt, q) + Expect(logBuffer.String()).To(Not(ContainSubstring("ansible.handler"))) + }) + + It("should not emit a log for an object which does not have ownerReferences", func() { + repl := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "faz", + }, + } + repl.SetGroupVersionKind(schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ReplicaSet"}) + + evt := event.CreateEvent{ + Object: repl, + } + + logBuffer.Reset() + instance.Create(evt, q) + Expect(logBuffer.String()).To(Not(ContainSubstring("ansible.handler"))) + }) + }) + + Describe("Delete", func() { + It("should emit a log with the ownerReferenc of the object in case of DeleteEvent", func() { + evt := event.DeleteEvent{ + Object: pod, + } + logBuffer.Reset() + instance.Delete(evt, q) + Expect(logBuffer.String()).To(MatchRegexp( + `ansible.handler.*Delete.*/v1.*Pod.*biz.*biz.*Pod.*podOwnerName`, + )) + }) + }) + + Describe("Update", func() { + It("should emit a log and enqueue a Request with annotations applied to both objects in UpdateEvent", func() { + newPod := pod.DeepCopy() + newPod.Name = pod.Name + "2" + newPod.Namespace = pod.Namespace + "2" + + evt := event.UpdateEvent{ + ObjectOld: pod, + ObjectNew: newPod, + } + + logBuffer.Reset() + instance.Update(evt, q) + Expect(logBuffer.String()).To(MatchRegexp( + `ansible.handler.*Update.*/v1.*Pod.*biz.*biz.*Pod.*podOwnerName`, + )) + }) + It("should emit a log with the ownerReferences applied in one of the objects in case of UpdateEvent", func() { + noOwnerPod := pod.DeepCopy() + noOwnerPod.Name = pod.Name + "2" + noOwnerPod.Namespace = pod.Namespace + "2" + noOwnerPod.OwnerReferences = []metav1.OwnerReference{} + + evt := event.UpdateEvent{ + ObjectOld: pod, + ObjectNew: noOwnerPod, + } + + logBuffer.Reset() + instance.Update(evt, q) + Expect(logBuffer.String()).To(MatchRegexp( + `ansible.handler.*Update.*/v1.*Pod.*biz.*biz.*Pod.*podOwnerName`, + )) + + evt = event.UpdateEvent{ + ObjectOld: noOwnerPod, + ObjectNew: pod, + } + + logBuffer.Reset() + instance.Update(evt, q) + Expect(logBuffer.String()).To(MatchRegexp( + `ansible.handler.*Update.*/v1.*Pod.*biz.*biz.*Pod.*podOwnerName`, + )) + }) + It("should emit a log when the OwnerReference is applied after creation in case of UpdateEvent", func() { + repl := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "faz", + }, + } + repl.SetGroupVersionKind(schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ReplicaSet"}) + + instance = LoggingEnqueueRequestForOwner{ + crHandler.EnqueueRequestForOwner{ + OwnerType: repl, + }} + + evt := event.CreateEvent{ + Object: repl, + } + + logBuffer.Reset() + instance.Create(evt, q) + Expect(logBuffer.String()).To(Not(ContainSubstring("ansible.handler"))) + + newRepl := repl.DeepCopy() + newRepl.Name = pod.Name + "2" + newRepl.Namespace = pod.Namespace + "2" + + newRepl.OwnerReferences = []metav1.OwnerReference{{ + APIVersion: "apps/v1", + Kind: "ReplicaSet", + Name: "faz", + }} + + evt2 := event.UpdateEvent{ + ObjectOld: repl, + ObjectNew: newRepl, + } + + logBuffer.Reset() + instance.Update(evt2, q) + Expect(logBuffer.String()).To(MatchRegexp( + `ansible.handler.*Update.*apps/v1.*ReplicaSet.*faz.*foo.*apps/v1.*ReplicaSet.*faz`, + )) + }) + }) + + Describe("Generic", func() { + It("should emit a log with the OwnerReference of the object in case of GenericEvent", func() { + evt := event.GenericEvent{ + Object: pod, + } + logBuffer.Reset() + instance.Generic(evt, q) + Expect(logBuffer.String()).To(MatchRegexp( + `ansible.handler.*Generic.*/v1.*Pod.*biz.*biz.*Pod.*podOwnerName`, + )) + }) + }) +}) diff --git a/internal/ansible/metrics/metrics.go b/internal/ansible/metrics/metrics.go new file mode 100644 index 0000000..ba808a5 --- /dev/null +++ b/internal/ansible/metrics/metrics.go @@ -0,0 +1,263 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +import ( + "errors" + "fmt" + + "github.com/prometheus/client_golang/prometheus" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/metrics" + + sdkVersion "github.com/operator-framework/ansible-operator-plugins/internal/version" +) + +const ( + subsystem = "ansible_operator" +) + +var ( + buildInfo = prometheus.NewGauge( + prometheus.GaugeOpts{ + Subsystem: subsystem, + Name: "build_info", + Help: "Build information for the ansible-operator binary", + ConstLabels: map[string]string{ + "commit": sdkVersion.GitCommit, + "version": sdkVersion.Version, + }, + }) + + reconcileResults = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Subsystem: subsystem, + Name: "reconcile_result", + Help: "Gauge of reconciles and their results.", + }, + []string{ + "GVK", + "result", + }) + + reconciles = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Subsystem: subsystem, + Name: "reconciles", + Help: "How long in seconds a reconcile takes.", + }, + []string{ + "GVK", + }) + + userMetrics = map[string]prometheus.Collector{} +) + +func init() { + metrics.Registry.MustRegister(reconcileResults) + metrics.Registry.MustRegister(reconciles) +} + +// We will never want to panic our app because of metric saving. +// Therefore, we will recover our panics here and error log them +// for later diagnosis but will never fail the app. +func recoverMetricPanic() { + if r := recover(); r != nil { + logf.Log.WithName("metrics").Error(fmt.Errorf("%v", r), + "Recovering from metric function") + } +} + +func RegisterBuildInfo(r prometheus.Registerer) { + buildInfo.Set(1) + r.MustRegister(buildInfo) +} + +type UserMetric struct { + Name string `json:"name" yaml:"name"` + Help string `json:"description" yaml:"description"` + Counter *UserMetricCounter `json:"counter,omitempty" yaml:"counter,omitempty"` + Gauge *UserMetricGauge `json:"gauge,omitempty" yaml:"gauge,omitempty"` + Histogram *UserMetricHistogram `json:"histogram,omitempty" yaml:"histogram,omitempty"` + Summary *UserMetricSummary `json:"summary,omitempty" yaml:"summary,omitempty"` +} + +type UserMetricCounter struct { + Inc bool `json:"increment,omitempty" yaml:"increment,omitempty"` + Add float64 `json:"add,omitempty" yaml:"add,omitempty"` +} + +type UserMetricGauge struct { + Set float64 `json:"set,omitempty" yaml:"set,omitempty"` + Inc bool `json:"increment,omitempty" yaml:"increment,omitempty"` + Dec bool `json:"decrement,omitempty" yaml:"decrement,omitempty"` + SetToCurrentTime bool `json:"set_to_current_time,omitempty" yaml:"set_to_current_time,omitempty"` + Add float64 `json:"add,omitempty" yaml:"add,omitempty"` + Sub float64 `json:"subtract,omitempty" yaml:"subtract,omitempty"` +} + +type UserMetricHistogram struct { + Observe float64 `json:"observe,omitempty" yaml:"observe,omitempty"` +} + +type UserMetricSummary struct { + Observe float64 `json:"observe,omitempty" yaml:"observe,omitempty"` +} + +func validateMetricSpec(metricSpec UserMetric) error { + var metricConfigs int + if metricSpec.Counter != nil { + metricConfigs++ + } + if metricSpec.Gauge != nil { + metricConfigs++ + } + if metricSpec.Summary != nil { + metricConfigs++ + } + if metricSpec.Histogram != nil { + metricConfigs++ + } + if metricConfigs > 1 { + return errors.New("only one metric can be processed at a time") + } else if metricConfigs == 0 { + return errors.New("a request should contain at least one metric") + } + return nil +} + +func handleCounter(metricSpec UserMetric, counter prometheus.Counter) error { + if metricSpec.Counter == nil { + return fmt.Errorf("cannot change metric type of %s, which is a counter", metricSpec.Name) + } + if metricSpec.Counter.Inc { + counter.Inc() + } else if metricSpec.Counter.Add != 0.0 { + if metricSpec.Counter.Add < 0 { + return errors.New("counter metrics cannot decrease in value") + } + counter.Add(metricSpec.Counter.Add) + } + return nil +} + +func handleGauge(metricSpec UserMetric, gauge prometheus.Gauge) error { + if metricSpec.Gauge == nil { + return fmt.Errorf("cannot change metric type of %s, which is a gauge", metricSpec.Name) + } + if metricSpec.Gauge.Inc { + gauge.Inc() + } else if metricSpec.Gauge.Dec { + gauge.Dec() + } else if metricSpec.Gauge.Add != 0.0 { + gauge.Add(metricSpec.Gauge.Add) + } else if metricSpec.Gauge.Sub != 0.0 { + gauge.Sub(metricSpec.Gauge.Sub) + } else if metricSpec.Gauge.Set != 0.0 { + gauge.Set(metricSpec.Gauge.Set) + } else if metricSpec.Gauge.SetToCurrentTime { + gauge.SetToCurrentTime() + } + return nil +} + +func handleSummaryOrHistogram(metricSpec UserMetric, summary prometheus.Summary) error { + if metricSpec.Histogram == nil && metricSpec.Summary == nil { + return fmt.Errorf("cannot change metric type of %s, which is a histogram or summary", metricSpec.Name) + } + if metricSpec.Histogram != nil { + summary.Observe(metricSpec.Histogram.Observe) + } else if metricSpec.Summary != nil { + summary.Observe(metricSpec.Summary.Observe) + } + return nil +} + +func ensureMetric(r prometheus.Registerer, metricSpec UserMetric) { + if _, ok := userMetrics[metricSpec.Name]; !ok { + // This is the first time we've seen this metric + logf.Log.WithName("metrics").Info("Registering", "metric", metricSpec.Name) + if metricSpec.Counter != nil { + userMetrics[metricSpec.Name] = prometheus.NewCounter(prometheus.CounterOpts{ + Name: metricSpec.Name, + Help: metricSpec.Help, + }) + } + if metricSpec.Gauge != nil { + userMetrics[metricSpec.Name] = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: metricSpec.Name, + Help: metricSpec.Help, + }) + } + if metricSpec.Histogram != nil { + userMetrics[metricSpec.Name] = prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: metricSpec.Name, + Help: metricSpec.Help, + }) + } + if metricSpec.Summary != nil { + userMetrics[metricSpec.Name] = prometheus.NewSummary(prometheus.SummaryOpts{ + Name: metricSpec.Name, + Help: metricSpec.Help, + }) + } + if err := r.Register(userMetrics[metricSpec.Name]); err != nil { + logf.Log.WithName("metrics").Info("Unable to register %s metric with prometheus.", metricSpec.Name) + } + } +} + +func HandleUserMetric(r prometheus.Registerer, metricSpec UserMetric) error { + if err := validateMetricSpec(metricSpec); err != nil { + return err + } + ensureMetric(r, metricSpec) + collector := userMetrics[metricSpec.Name] + switch v := collector.(type) { + // Gauge must be first, because a Counter is a Gauge, but a Gauge is not a Counter. + case prometheus.Gauge: + if err := handleGauge(metricSpec, v); err != nil { + return err + } + case prometheus.Counter: + if err := handleCounter(metricSpec, v); err != nil { + return err + } + // Histogram and Summary interfaces are identical, so we accept either case. + case prometheus.Histogram: + if err := handleSummaryOrHistogram(metricSpec, v); err != nil { + return err + } + } + return nil +} + +func ReconcileSucceeded(gvk string) { + defer recoverMetricPanic() + reconcileResults.WithLabelValues(gvk, "succeeded").Inc() +} + +func ReconcileFailed(gvk string) { + // TODO: consider taking in a failure reason + defer recoverMetricPanic() + reconcileResults.WithLabelValues(gvk, "failed").Inc() +} + +func ReconcileTimer(gvk string) *prometheus.Timer { + defer recoverMetricPanic() + return prometheus.NewTimer(prometheus.ObserverFunc(func(duration float64) { + reconciles.WithLabelValues(gvk).Observe(duration) + })) +} diff --git a/internal/ansible/paramconv/paramconv.go b/internal/ansible/paramconv/paramconv.go new file mode 100644 index 0000000..4e28383 --- /dev/null +++ b/internal/ansible/paramconv/paramconv.go @@ -0,0 +1,209 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Based on https://github.com/iancoleman/strcase + +package paramconv + +import ( + "regexp" + "strings" + + "golang.org/x/text/cases" + "golang.org/x/text/language" +) + +var ( + numberSequence = regexp.MustCompile(`([a-zA-Z])(\d+)([a-zA-Z](\d+))`) + numberReplacement = []byte(`$1 $2 $3`) + wordMapping = map[string]string{ + "http": "HTTP", + "url": "URL", + "ip": "IP", + } +) + +func addWordBoundariesToNumbers(s string) string { + b := []byte(s) + b = numberSequence.ReplaceAll(b, numberReplacement) + return string(b) +} + +func translateWord(word string, initCase bool) string { + if val, ok := wordMapping[word]; ok { + return val + } + if initCase { + caser := cases.Title(language.AmericanEnglish) + return caser.String(word) + } + return word +} + +// Converts a string to CamelCase +func ToCamel(s string) string { + s = addWordBoundariesToNumbers(s) + s = strings.Trim(s, " ") + n := "" + bits := []string{} + for _, v := range s { + if v == '_' || v == ' ' || v == '-' { + bits = append(bits, n) + n = "" + } else { + n += string(v) + } + } + bits = append(bits, n) + + ret := "" + for i, substr := range bits { + ret += translateWord(substr, i != 0) + } + return ret +} + +// preprocessWordMapping() will check if value contains special words mapped or its plural in +// wordMapping, then processes it such that ToSnake() can convert it to snake case. +// If value contains special word, the character "_" is appended as a prefix and postfix +// to the special word found. For example, if the input string is "egressIP", +// which contains is a special word "IP", the function will return "egress_IP". +// If the last character of the special word is an "s" (i.e plural of the word +// found in wordMapping), it is considered a part of that word and will be capitalized. +func preprocessWordMapping(value string) string { + + for _, word := range wordMapping { + idx := strings.Index(value, word) + if idx >= 0 { + // The special non-plural word appears at the end of the string. + if (idx + len(word) - 1) == len(value)-1 { + value = value[:idx] + "_" + value[idx:] + } else if value[idx+len(word)] == 's' { + // The special plural word occurs at the end, start, or somewhere in the middle of value. + if idx+len(word) == len(value)-1 { + value = value[:idx] + "_" + value[idx:(idx+len(word))] + "S" + } else if idx == 0 { + value = value[:(idx+len(word))] + "S" + "_" + value[(idx+len(word)+1):] + } else { + value = value[:idx] + "_" + value[idx:(idx+len(word))] + "S" + "_" + value[(idx+len(word)+1):] + } + } else if idx == 0 { + // The special non-plural word occurs at the start or somewhere in the middle of value. + value = value[:(idx+len(word))] + "_" + value[(idx+len(word)):] + } else { + value = value[:idx] + "_" + value[idx:(idx+len(word))] + "_" + value[(idx+len(word)):] + } + } + } + + return value +} + +// Converts a string to snake_case +func ToSnake(s string) string { + s = addWordBoundariesToNumbers(s) + s = strings.Trim(s, " ") + var prefix string + char1 := []rune(s)[0] + if char1 >= 'A' && char1 <= 'Z' { + prefix = "_" + } else { + prefix = "" + } + bits := []string{} + n := "" + iReal := -1 + + // append underscore (_) as prefix and postfix to isolate special words defined in the wordMapping + s = preprocessWordMapping(s) + + for i, v := range s { + iReal++ + // treat acronyms as words, eg for JSONData -> JSON is a whole word + nextCaseIsChanged := false + if i+1 < len(s) { + next := s[i+1] + if (v >= 'A' && v <= 'Z' && next >= 'a' && next <= 'z') || (v >= 'a' && v <= 'z' && next >= 'A' && next <= 'Z') { + nextCaseIsChanged = true + } + } + + if iReal > 0 && n[len(n)-1] != '_' && nextCaseIsChanged { + // add underscore if next letter case type is changed + if v >= 'A' && v <= 'Z' { + bits = append(bits, strings.ToLower(n)) + n = string(v) + iReal = 0 + } else if v >= 'a' && v <= 'z' { + bits = append(bits, strings.ToLower(n+string(v))) + n = "" + iReal = -1 + } + } else if v == ' ' || v == '_' || v == '-' { + // replace spaces/underscores with delimiters + bits = append(bits, strings.ToLower(n)) + n = "" + iReal = -1 + } else { + n = n + string(v) + } + } + bits = append(bits, strings.ToLower(n)) + joined := strings.Join(bits, "_") + + // prepending an underscore (_) if the word begins with a Capital Letter + if _, ok := wordMapping[bits[0]]; !ok { + return prefix + joined + } + return joined +} + +func convertParameter(fn func(string) string, v interface{}) interface{} { + switch v := v.(type) { + case map[string]interface{}: + ret := map[string]interface{}{} + for key, val := range v { + ret[fn(key)] = convertParameter(fn, val) + } + return ret + case []interface{}: + return convertArray(fn, v) + default: + return v + } +} + +func convertArray(fn func(string) string, in []interface{}) []interface{} { + res := make([]interface{}, len(in)) + for i, v := range in { + res[i] = convertParameter(fn, v) + } + return res +} + +func convertMapKeys(fn func(string) string, in map[string]interface{}) map[string]interface{} { + converted := map[string]interface{}{} + for key, val := range in { + converted[fn(key)] = convertParameter(fn, val) + } + return converted +} + +func MapToSnake(in map[string]interface{}) map[string]interface{} { + return convertMapKeys(ToSnake, in) +} + +func MapToCamel(in map[string]interface{}) map[string]interface{} { + return convertMapKeys(ToCamel, in) +} diff --git a/internal/ansible/paramconv/paramconv_test.go b/internal/ansible/paramconv/paramconv_test.go new file mode 100644 index 0000000..feac3a0 --- /dev/null +++ b/internal/ansible/paramconv/paramconv_test.go @@ -0,0 +1,242 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Based on https://github.com/iancoleman/strcase + +package paramconv + +import ( + "reflect" + "testing" +) + +func TestMapToCamel(t *testing.T) { + type args struct { + in map[string]interface{} + } + tests := []struct { + name string + args args + want map[string]interface{} + }{ + { + name: "should convert the Map to Camel", + args: args{map[string]interface{}{ + "var": "value", + "appService": "value", + "app_8sk_": "value", + "_app_8sk_test": "value", + }}, + want: map[string]interface{}{ + "var": "value", + "appService": "value", + "app8Sk": "value", + "App8SkTest": "value", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := MapToCamel(tt.args.in); !reflect.DeepEqual(got, tt.want) { + t.Errorf("MapToCamel() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestMapToSnake(t *testing.T) { + type args struct { + in map[string]interface{} + } + tests := []struct { + name string + args args + want map[string]interface{} + }{ + { + name: "should convert the Map to Snake", + args: args{map[string]interface{}{ + "var": "value", + "var_var": "value", + "size_k8s_test": "value", + "888": "value", + }}, + want: map[string]interface{}{ + "var": "value", + "var_var": "value", + "size_k8s_test": "value", + "888": "value", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := MapToSnake(tt.args.in); !reflect.DeepEqual(got, tt.want) { + t.Errorf("MapToSnake() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestToCamel(t *testing.T) { + type args struct { + s string + } + tests := []struct { + name string + args args + want string + }{ + { + name: "should convert to Camel", + args: args{"app_test"}, + want: "appTest", + }, + { + name: "should convert to Camel when start with _", + args: args{"_app_test"}, + want: "AppTest", + }, + { + name: "should convert to Camel when has numbers", + args: args{"_app_test_k8s"}, + want: "AppTestK8s", + }, { + name: "should convert to Camel when has numbers and _", + args: args{"var_k8s"}, + want: "varK8s", + }, + { + name: "should handle special words", + args: args{"egressIPs"}, + want: "egressIPs", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ToCamel(tt.args.s); got != tt.want { + t.Errorf("ToCamel() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestToSnake(t *testing.T) { + type args struct { + s string + } + tests := []struct { + name string + args args + want string + }{ + { + name: "should keep the same", + args: args{"var"}, + want: "var", + }, + { + name: "should convert to Snake when is only numbers", + args: args{"888"}, + want: "888", + }, + { + name: "should convert to Snake when has numbers and _", + args: args{"k8s_var"}, + want: "k8s_var", + }, + { + name: "should convert to Snake when start with _", + args: args{"_k8s_var"}, + want: "_k8s_var", + }, + { + name: "should convert to Snake and replace the space for _", + args: args{"k8s var"}, + want: "k8s_var", + }, + { + name: "should handle Camel and add _ prefix when starts with", + args: args{"ThisShouldHaveUnderscores"}, + want: "_this_should_have_underscores", + }, + { + name: "should convert to snake when has Camel and numbers", + args: args{"sizeK8sBuckets"}, + want: "size_k8s_buckets", + }, + { + name: "should be able to handle mixed vars", + args: args{"_CanYou_Handle_mixedVars"}, + want: "_can_you_handle_mixed_vars", + }, + { + name: "should be a noop", + args: args{"this_should_be_a_noop"}, + want: "this_should_be_a_noop", + }, + { + name: "should handle special plural word at end", + args: args{"egressIPs"}, + want: "egress_ips", + }, + { + name: "should handle special plural word in middle", + args: args{"egressIPsEgress"}, + want: "egress_ips_egress", + }, + { + name: "should handle special plural word in middle followed by lowercase letter", + args: args{"egressIPsegress"}, + want: "egress_ips_egress", + }, + { + name: "should handle special plural word at the start", + args: args{"IPsegress"}, + want: "_ips_egress", + }, + { + name: "should handle special word at the end", + args: args{"egressIP"}, + want: "egress_ip", + }, + { + name: "should handle special word in the middle", + args: args{"egressIPEgress"}, + want: "egress_ip_egress", + }, + { + name: "should handle special word in the middle followed by lowercase", + args: args{"egressIPegress"}, + want: "egress_ip_egress", + }, + { + name: "should handle multiple special words", + args: args{"URLegressIPEgressHTTP"}, + want: "url_egress_ip_egress_http", + }, + { + name: "should handle multiple plural special words", + args: args{"URLsegressIPsEgressHTTPs"}, + want: "_urls_egress_ips_egress_https", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ToSnake(tt.args.s); got != tt.want { + t.Errorf("ToSnake() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/internal/ansible/proxy/cache_response.go b/internal/ansible/proxy/cache_response.go new file mode 100644 index 0000000..098b0b2 --- /dev/null +++ b/internal/ansible/proxy/cache_response.go @@ -0,0 +1,305 @@ +// Copyright 2019 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proxy + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "regexp" + "strings" + + libhandler "github.com/operator-framework/operator-lib/handler" + "k8s.io/apimachinery/pkg/api/meta" + metainternalscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/set" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/proxy/controllermap" + k8sRequest "github.com/operator-framework/ansible-operator-plugins/internal/ansible/proxy/requestfactory" +) + +type marshaler interface { + MarshalJSON() ([]byte, error) +} + +type cacheResponseHandler struct { + next http.Handler + informerCache cache.Cache + restMapper meta.RESTMapper + watchedNamespaces map[string]interface{} + cMap *controllermap.ControllerMap + injectOwnerRef bool + apiResources *apiResources + skipPathRegexp []*regexp.Regexp +} + +func (c *cacheResponseHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + switch req.Method { + case http.MethodGet: + // GET request means we need to check the cache + rf := k8sRequest.RequestInfoFactory{APIPrefixes: set.New("api", "apis"), + GrouplessAPIPrefixes: set.New("api")} + r, err := rf.NewRequestInfo(req) + if err != nil { + log.Error(err, "Failed to convert request") + break + } + + // Skip cache for non-cacheable requests, not a part of skipCacheLookup for performance. + if !r.IsResourceRequest || !(r.Subresource == "" || r.Subresource == "status") { + log.V(2).Info("Skipping cache lookup", "resource", r) + break + } + + if c.restMapper == nil { + c.restMapper = meta.NewDefaultRESTMapper([]schema.GroupVersion{schema.GroupVersion{ + Group: r.APIGroup, + Version: r.APIVersion, + }}) + } + k, err := getGVKFromRequestInfo(r, c.restMapper) + if err != nil { + // break here in case resource doesn't exist in cache + log.Error(err, "Cache miss, can not find in rest mapper") + break + } + + if c.skipCacheLookup(r, k, req) { + log.V(2).Info("Skipping cache lookup", "resource", r) + break + } + + // Determine if the resource is virtual. If it is then we should not attempt to use cache + isVR, err := c.apiResources.IsVirtualResource(k) + if err != nil { + // break here in case we can not understand if virtual resource or not + log.Error(err, "Unable to determine if virtual resource", "gvk", k) + break + } + + if isVR { + log.V(2).Info("Virtual resource, must ask the cluster API", "gvk", k) + break + } + + var m marshaler + + log.V(2).Info("Get resource in our cache", "r", r) + if r.Verb == "list" { + m, err = c.getListFromCache(r, req, k) + if err != nil { + break + } + } else { + m, err = c.getObjectFromCache(r, req, k) + if err != nil { + break + } + } + + i := bytes.Buffer{} + resp, err := m.MarshalJSON() + if err != nil { + // return will give a 500 + log.Error(err, "Failed to marshal data") + http.Error(w, "", http.StatusInternalServerError) + return + } + + // Set Content-Type header + w.Header().Set("Content-Type", "application/json") + // Set X-Cache header to signal that response is served from Cache + w.Header().Set("X-Cache", "HIT") + if err := json.Indent(&i, resp, "", " "); err != nil { + log.Error(err, "Failed to indent json") + } + _, err = w.Write(i.Bytes()) + if err != nil { + log.Error(err, "Failed to write response") + http.Error(w, "", http.StatusInternalServerError) + return + } + + // Return so that request isn't passed along to APIserver + log.Info("Read object from cache", "resource", r) + return + } + c.next.ServeHTTP(w, req) +} + +// skipCacheLookup - determine if we should skip the cache lookup +func (c *cacheResponseHandler) skipCacheLookup(r *k8sRequest.RequestInfo, gvk schema.GroupVersionKind, + req *http.Request) bool { + + skip := matchesRegexp(req.URL.String(), c.skipPathRegexp) + if skip { + return true + } + + owner, err := getRequestOwnerRef(req) + if err != nil { + log.Error(err, "Could not get owner reference from proxy.") + return false + } + if owner != nil { + ownerGV, err := schema.ParseGroupVersion(owner.APIVersion) + if err != nil { + m := fmt.Sprintf("Could not get group version for: %v.", owner) + log.Error(err, m) + return false + } + ownerGVK := schema.GroupVersionKind{ + Group: ownerGV.Group, + Version: ownerGV.Version, + Kind: owner.Kind, + } + + relatedController, ok := c.cMap.Get(ownerGVK) + if !ok { + log.Info("Could not find controller for gvk.", "ownerGVK:", ownerGVK) + return false + } + if relatedController.Blacklist[gvk] { + log.Info("Skipping, because gvk is blacklisted", "GVK", gvk) + return true + } + } + // check if resource doesn't exist in watched namespaces + // if watchedNamespaces[""] exists then we are watching all namespaces + // and want to continue + _, allNsPresent := c.watchedNamespaces[metav1.NamespaceAll] + _, reqNsPresent := c.watchedNamespaces[r.Namespace] + if !allNsPresent && !reqNsPresent { + return true + } + + if strings.HasPrefix(r.Path, "/version") { + // Temporarily pass along to API server + // Ideally we cache this response as well + return true + } + + return false +} + +func (c *cacheResponseHandler) recoverDependentWatches(req *http.Request, un *unstructured.Unstructured) { + ownerRef, err := getRequestOwnerRef(req) + if err != nil { + log.Error(err, "Could not get ownerRef from proxy") + return + } + // This happens when a request unrelated to reconciliation hits the proxy + if ownerRef == nil { + return + } + + for _, oRef := range un.GetOwnerReferences() { + if oRef.APIVersion == ownerRef.APIVersion && oRef.Kind == ownerRef.Kind { + err := addWatchToController(*ownerRef, c.cMap, un, c.restMapper, true) + if err != nil { + log.Error(err, "Could not recover dependent resource watch", "owner", ownerRef) + return + } + } + } + if typeString, ok := un.GetAnnotations()[libhandler.TypeAnnotation]; ok { + ownerGV, err := schema.ParseGroupVersion(ownerRef.APIVersion) + if err != nil { + m := fmt.Sprintf("could not get group version for: %v", ownerGV) + log.Error(err, m) + return + } + if typeString == fmt.Sprintf("%v.%v", ownerRef.Kind, ownerGV.Group) { + err := addWatchToController(*ownerRef, c.cMap, un, c.restMapper, false) + if err != nil { + log.Error(err, "Could not recover dependent resource watch", "owner", ownerRef) + return + } + } + } +} + +func (c *cacheResponseHandler) getListFromCache(r *k8sRequest.RequestInfo, req *http.Request, + k schema.GroupVersionKind) (marshaler, error) { + k8sListOpts := &metav1.ListOptions{} + if err := metainternalscheme.ParameterCodec.DecodeParameters(req.URL.Query(), metav1.SchemeGroupVersion, + k8sListOpts); err != nil { + log.Error(err, "Unable to decode list options from request") + return nil, err + } + clientListOpts := []client.ListOption{ + client.InNamespace(r.Namespace), + } + if k8sListOpts.LabelSelector != "" { + sel, err := labels.ConvertSelectorToLabelsMap(k8sListOpts.LabelSelector) + if err != nil { + log.Error(err, "Unable to convert label selectors for the client") + return nil, err + } + clientListOpts = append(clientListOpts, client.MatchingLabels(sel)) + } + if k8sListOpts.FieldSelector != "" { + sel, err := fields.ParseSelector(k8sListOpts.FieldSelector) + if err != nil { + log.Error(err, "Unable to parse field selectors for the client") + return nil, err + } + clientListOpts = append(clientListOpts, client.MatchingFieldsSelector{Selector: sel}) + } + k.Kind = k.Kind + "List" + un := unstructured.UnstructuredList{} + un.SetGroupVersionKind(k) + ctx, cancel := context.WithTimeout(context.Background(), cacheEstablishmentTimeout) + defer cancel() + err := c.informerCache.List(ctx, &un, clientListOpts...) + if err != nil { + // break here in case resource doesn't exist in cache but exists on APIserver + // This is very unlikely but provides user with expected 404 + log.Info(fmt.Sprintf("cache miss: %v err-%v", k, err)) + return nil, err + } + return &un, nil +} + +func (c *cacheResponseHandler) getObjectFromCache(r *k8sRequest.RequestInfo, req *http.Request, + k schema.GroupVersionKind) (marshaler, error) { + un := &unstructured.Unstructured{} + un.SetGroupVersionKind(k) + obj := client.ObjectKey{Namespace: r.Namespace, Name: r.Name} + ctx, cancel := context.WithTimeout(context.Background(), cacheEstablishmentTimeout) + defer cancel() + err := c.informerCache.Get(ctx, obj, un) + if err != nil { + // break here in case resource doesn't exist in cache but exists on APIserver + // This is very unlikely but provides user with expected 404 + log.Info(fmt.Sprintf("Cache miss: %v, %v", k, obj)) + return nil, err + } + // Once we get the resource, we are going to attempt to recover the dependent watches here, + // This will happen in the background, and log errors. + if c.injectOwnerRef { + go c.recoverDependentWatches(req, un) + } + return un, nil +} diff --git a/internal/ansible/proxy/controllermap/controllermap.go b/internal/ansible/proxy/controllermap/controllermap.go new file mode 100644 index 0000000..fe2380b --- /dev/null +++ b/internal/ansible/proxy/controllermap/controllermap.go @@ -0,0 +1,111 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controllermap + +import ( + "sync" + + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/controller" +) + +// ControllerMap - map of GVK to ControllerMapContents +type ControllerMap struct { + mutex sync.RWMutex + internal map[schema.GroupVersionKind]*Contents +} + +// WatchMap - map of GVK to interface. Determines if resource is being watched already +type WatchMap struct { + mutex sync.RWMutex + internal map[schema.GroupVersionKind]interface{} +} + +// Contents - Contains internal data associated with each controller +type Contents struct { + Controller controller.Controller + WatchDependentResources bool + WatchClusterScopedResources bool + OwnerWatchMap *WatchMap + AnnotationWatchMap *WatchMap + Blacklist map[schema.GroupVersionKind]bool +} + +// NewControllerMap returns a new object that contains a mapping between GVK +// and ControllerMapContents object +func NewControllerMap() *ControllerMap { + return &ControllerMap{ + internal: make(map[schema.GroupVersionKind]*Contents), + } +} + +// NewWatchMap - returns a new object that maps GVK to interface to determine +// if resource is being watched +func NewWatchMap() *WatchMap { + return &WatchMap{ + internal: make(map[schema.GroupVersionKind]interface{}), + } +} + +// Get - Returns a ControllerMapContents given a GVK as the key. `ok` +// determines if the key exists +func (cm *ControllerMap) Get(key schema.GroupVersionKind) (value *Contents, ok bool) { + cm.mutex.RLock() + defer cm.mutex.RUnlock() + value, ok = cm.internal[key] + return value, ok +} + +// Delete - Deletes associated GVK to controller mapping from the ControllerMap +func (cm *ControllerMap) Delete(key schema.GroupVersionKind) { + cm.mutex.Lock() + defer cm.mutex.Unlock() + delete(cm.internal, key) +} + +// Store - Adds a new GVK to controller mapping +func (cm *ControllerMap) Store(key schema.GroupVersionKind, value *Contents, blacklist []schema.GroupVersionKind) { + cm.mutex.Lock() + defer cm.mutex.Unlock() + cm.internal[key] = value + // watches.go Blacklist is []schema.GroupVersionKind, which we convert to a map (better performance) + // for the controller. + value.Blacklist = map[schema.GroupVersionKind]bool{} + for _, blacklistGVK := range blacklist { + cm.internal[key].Blacklist[blacklistGVK] = true + } +} + +// Get - Checks if GVK is already watched +func (wm *WatchMap) Get(key schema.GroupVersionKind) (value interface{}, ok bool) { + wm.mutex.RLock() + defer wm.mutex.RUnlock() + value, ok = wm.internal[key] + return value, ok +} + +// Delete - Deletes associated watches for a specific GVK +func (wm *WatchMap) Delete(key schema.GroupVersionKind) { + wm.mutex.Lock() + defer wm.mutex.Unlock() + delete(wm.internal, key) +} + +// Store - Adds a new GVK to be watched +func (wm *WatchMap) Store(key schema.GroupVersionKind) { + wm.mutex.Lock() + defer wm.mutex.Unlock() + wm.internal[key] = nil +} diff --git a/internal/ansible/proxy/inject_owner.go b/internal/ansible/proxy/inject_owner.go new file mode 100644 index 0000000..317256b --- /dev/null +++ b/internal/ansible/proxy/inject_owner.go @@ -0,0 +1,188 @@ +// Copyright 2019 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proxy + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httputil" + + "github.com/operator-framework/operator-lib/handler" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/set" + + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/proxy/controllermap" + k8sRequest "github.com/operator-framework/ansible-operator-plugins/internal/ansible/proxy/requestfactory" + "github.com/operator-framework/ansible-operator-plugins/internal/util/k8sutil" +) + +// injectOwnerReferenceHandler will handle proxied requests and inject the +// owner reference found in the authorization header. The Authorization is +// then deleted so that the proxy can re-set with the correct authorization. +type injectOwnerReferenceHandler struct { + next http.Handler + cMap *controllermap.ControllerMap + restMapper meta.RESTMapper + watchedNamespaces map[string]interface{} + apiResources *apiResources +} + +func (i *injectOwnerReferenceHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + switch req.Method { + case http.MethodPost: + dump, _ := httputil.DumpRequest(req, false) + log.V(2).Info("Dumping request", "RequestDump", string(dump)) + rf := k8sRequest.RequestInfoFactory{APIPrefixes: set.New("api", "apis"), + GrouplessAPIPrefixes: set.New("api")} + r, err := rf.NewRequestInfo(req) + if err != nil { + m := "Could not convert request" + log.Error(err, m) + http.Error(w, m, http.StatusBadRequest) + return + } + if r.Subresource != "" { + // Don't inject owner ref if we are POSTing to a subresource + break + } + + if i.restMapper == nil { + i.restMapper = meta.NewDefaultRESTMapper([]schema.GroupVersion{schema.GroupVersion{ + Group: r.APIGroup, + Version: r.APIVersion, + }}) + } + + k, err := getGVKFromRequestInfo(r, i.restMapper) + if err != nil { + // break here in case resource doesn't exist in cache + log.Error(err, "Cache miss, can not find in rest mapper") + break + } + + // Determine if the resource is virtual. If it is then we should not attempt to use cache + isVR, err := i.apiResources.IsVirtualResource(k) + if err != nil { + // Fail if we can't determine whether it's a virtual resource or not. + // Otherwise we might create a resource without an ownerReference, which will prevent + // dependentWatches from being re-established and garbage collection from deleting the + // resource, unless a user manually adds the ownerReference. + m := "Unable to determine if virtual resource" + log.Error(err, m, "gvk", k) + http.Error(w, m, http.StatusInternalServerError) + return + } + + if isVR { + log.V(2).Info("Virtual resource, must ask the cluster API", "gvk", k) + break + } + + log.Info("Injecting owner reference") + owner, err := getRequestOwnerRef(req) + if err != nil { + m := "Could not get owner reference" + log.Error(err, m) + http.Error(w, m, http.StatusInternalServerError) + return + } + if owner != nil { + body, err := io.ReadAll(req.Body) + if err != nil { + m := "Could not read request body" + log.Error(err, m) + http.Error(w, m, http.StatusInternalServerError) + return + } + data := &unstructured.Unstructured{} + err = json.Unmarshal(body, data) + if err != nil { + m := "Could not deserialize request body" + log.Error(err, m) + http.Error(w, m, http.StatusBadRequest) + return + } + ownerGV, err := schema.ParseGroupVersion(owner.APIVersion) + if err != nil { + m := fmt.Sprintf("could not get group version for: %v", owner) + log.Error(err, m) + http.Error(w, m, http.StatusBadRequest) + return + } + ownerGVK := schema.GroupVersionKind{ + Group: ownerGV.Group, + Version: ownerGV.Version, + Kind: owner.Kind, + } + ownerObject := &unstructured.Unstructured{} + ownerObject.SetGroupVersionKind(ownerGVK) + ownerObject.SetNamespace(owner.Namespace) + ownerObject.SetName(owner.Name) + addOwnerRef, err := k8sutil.SupportsOwnerReference(i.restMapper, ownerObject, data, r.Namespace) + if err != nil { + m := "Could not determine if we should add owner ref" + log.Error(err, m) + http.Error(w, m, http.StatusBadRequest) + return + } + if addOwnerRef { + data.SetOwnerReferences(append(data.GetOwnerReferences(), owner.OwnerReference)) + } else { + err := handler.SetOwnerAnnotations(ownerObject, data) + if err != nil { + m := "Could not set owner annotations" + log.Error(err, m) + http.Error(w, m, http.StatusBadRequest) + return + } + } + newBody, err := json.Marshal(data.Object) + if err != nil { + m := "Could not serialize body" + log.Error(err, m) + http.Error(w, m, http.StatusInternalServerError) + return + } + log.V(2).Info("Serialized body", "Body", string(newBody)) + req.Body = io.NopCloser(bytes.NewBuffer(newBody)) + req.ContentLength = int64(len(newBody)) + + // add watch for resource + // check if resource doesn't exist in watched namespaces + // if watchedNamespaces[""] exists then we are watching all namespaces + // and want to continue + // This is making sure we are not attempting to watch a resource outside of the + // namespaces that the cache can watch. + _, allNsPresent := i.watchedNamespaces[metav1.NamespaceAll] + _, reqNsPresent := i.watchedNamespaces[r.Namespace] + if allNsPresent || reqNsPresent { + err = addWatchToController(*owner, i.cMap, data, i.restMapper, addOwnerRef) + if err != nil { + m := "could not add watch to controller" + log.Error(err, m) + http.Error(w, m, http.StatusInternalServerError) + return + } + } + } + } + i.next.ServeHTTP(w, req) +} diff --git a/internal/ansible/proxy/inject_owner_test.go b/internal/ansible/proxy/inject_owner_test.go new file mode 100644 index 0000000..8961357 --- /dev/null +++ b/internal/ansible/proxy/inject_owner_test.go @@ -0,0 +1,125 @@ +// Copyright 2021 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proxy + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/proxy/kubeconfig" +) + +var _ = Describe("injectOwnerReferenceHandler", func() { + + Describe("ServeHTTP", func() { + It("Should inject ownerReferences even when namespace is not explicitly set", func() { + if testing.Short() { + Skip("skipping ansible owner reference injection testing in short mode") + } + cm := corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-owner-ref-injection", + }, + Data: map[string]string{ + "hello": "world", + }, + } + + body, err := json.Marshal(cm) + if err != nil { + Fail("Failed to marshal body") + } + + po, err := createTestPod("test-injection", "default", testClient) + if err != nil { + Fail(fmt.Sprintf("Failed to create pod: %v", err)) + } + defer func() { + if err := testClient.Delete(context.Background(), po); err != nil { + Fail(fmt.Sprintf("Failed to delete the pod: %v", err)) + } + }() + + req, err := http.NewRequest("POST", "http://localhost:8888/api/v1/namespaces/default/configmaps", bytes.NewReader(body)) + if err != nil { + Fail(fmt.Sprintf("Failed to create http request: %v", err)) + } + + username, err := kubeconfig.EncodeOwnerRef( + metav1.OwnerReference{ + APIVersion: "v1", + Kind: "Pod", + Name: po.GetName(), + UID: po.GetUID(), + }, "default") + if err != nil { + Fail("Failed to encode owner reference") + } + req.SetBasicAuth(username, "unused") + + httpClient := http.Client{} + + defer func() { + cleanupReq, err := http.NewRequest("DELETE", "http://localhost:8888/api/v1/namespaces/default/configmaps/test-owner-ref-injection", bytes.NewReader([]byte{})) + if err != nil { + Fail(fmt.Sprintf("Failed to delete configmap: %v", err)) + } + _, err = httpClient.Do(cleanupReq) + if err != nil { + Fail(fmt.Sprintf("Failed to delete configmap: %v", err)) + } + }() + + resp, err := httpClient.Do(req) + if err != nil { + Fail(fmt.Sprintf("Failed to create configmap: %v", err)) + } + respBody, err := io.ReadAll(resp.Body) + if err != nil { + Fail(fmt.Sprintf("Failed to read response body: %v", err)) + } + var modifiedCM corev1.ConfigMap + err = json.Unmarshal(respBody, &modifiedCM) + if err != nil { + Fail(fmt.Sprintf("Failed to unmarshal configmap: %v", err)) + } + ownerRefs := modifiedCM.ObjectMeta.OwnerReferences + + Expect(ownerRefs).To(HaveLen(1)) + + ownerRef := ownerRefs[0] + + Expect(ownerRef.APIVersion).To(Equal("v1")) + Expect(ownerRef.Kind).To(Equal("Pod")) + Expect(ownerRef.Name).To(Equal(po.GetName())) + Expect(ownerRef.UID).To(Equal(po.GetUID())) + }) + }) +}) diff --git a/internal/ansible/proxy/kubeconfig/kubeconfig.go b/internal/ansible/proxy/kubeconfig/kubeconfig.go new file mode 100644 index 0000000..3c5d050 --- /dev/null +++ b/internal/ansible/proxy/kubeconfig/kubeconfig.go @@ -0,0 +1,126 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubeconfig + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "html/template" + "net/url" + "os" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("kubeconfig") + +// kubectl, as of 1.10.5, only does basic auth if the username is present in +// the URL. The python client used by ansible, as of 6.0.0, only does basic +// auth if the username and password are provided under the "user" key within +// "users". +const kubeConfigTemplate = `--- +apiVersion: v1 +kind: Config +clusters: +- cluster: + insecure-skip-tls-verify: true + server: {{.ProxyURL}} + name: proxy-server +contexts: +- context: + cluster: proxy-server + user: admin/proxy-server + name: {{.Namespace}}/proxy-server +current-context: {{.Namespace}}/proxy-server +preferences: {} +users: +- name: admin/proxy-server + user: + username: {{.Username}} + password: unused +` + +// values holds the data used to render the template +type values struct { + Username string + ProxyURL string + Namespace string +} + +type NamespacedOwnerReference struct { + metav1.OwnerReference + Namespace string +} + +// EncodeOwnerRef takes an ownerReference and a namespace and returns a base64 encoded +// string that can be used in the username field of a request to associate the +// owner with the request being made. +func EncodeOwnerRef(ownerRef metav1.OwnerReference, namespace string) (string, error) { + nsOwnerRef := NamespacedOwnerReference{OwnerReference: ownerRef, Namespace: namespace} + ownerRefJSON, err := json.Marshal(nsOwnerRef) + if err != nil { + return "", err + } + return base64.URLEncoding.EncodeToString(ownerRefJSON), nil +} + +// Create renders a kubeconfig template and writes it to disk +func Create(ownerRef metav1.OwnerReference, proxyURL string, namespace string) (*os.File, error) { + parsedURL, err := url.Parse(proxyURL) + if err != nil { + return nil, err + } + username, err := EncodeOwnerRef(ownerRef, namespace) + if err != nil { + return nil, err + } + parsedURL.User = url.User(username) + v := values{ + Username: username, + ProxyURL: parsedURL.String(), + Namespace: namespace, + } + + var parsed bytes.Buffer + + t := template.Must(template.New("kubeconfig").Parse(kubeConfigTemplate)) + if err := t.Execute(&parsed, v); err != nil { + return nil, err + } + + file, err := os.CreateTemp("", "kubeconfig") + if err != nil { + return nil, err + } + // multiple calls to close file will not hurt anything, + // but we don't want to lose the error because we are + // writing to the file, so we will call close twice. + defer func() { + if err := file.Close(); err != nil && !errors.Is(err, os.ErrClosed) { + log.Error(err, "Failed to close generated kubeconfig file") + } + }() + + if _, err := file.WriteString(parsed.String()); err != nil { + return nil, err + } + if err := file.Close(); err != nil { + return nil, err + } + return file, nil +} diff --git a/internal/ansible/proxy/kubectl.go b/internal/ansible/proxy/kubectl.go new file mode 100644 index 0000000..1651008 --- /dev/null +++ b/internal/ansible/proxy/kubectl.go @@ -0,0 +1,278 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This code was retrieved from +// https://github.com/kubernetes/kubernetes/blob/204d994/pkg/kubectl/proxy/proxy_server.go +// and modified for use in this project. + +package proxy + +import ( + "fmt" + "net" + "net/http" + "net/url" + "os" + "regexp" + "strings" + "time" + + utilnet "k8s.io/apimachinery/pkg/util/net" + k8sproxy "k8s.io/apimachinery/pkg/util/proxy" + "k8s.io/client-go/rest" + "k8s.io/client-go/transport" + "k8s.io/kubectl/pkg/util" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("proxy") + +const ( + // DefaultHostAcceptRE is the default value for which hosts to accept. + DefaultHostAcceptRE = "^localhost$,^127\\.0\\.0\\.1$,^\\[::1\\]$" + // DefaultPathAcceptRE is the default path to accept. + DefaultPathAcceptRE = "^.*" + // DefaultPathRejectRE is the default set of paths to reject. + DefaultPathRejectRE = "^$" + // DefaultMethodRejectRE is the set of HTTP methods to reject by default. + DefaultMethodRejectRE = "^$" +) + +var ( + // ReverseProxyFlushInterval is the frequency to flush the reverse proxy. + // Only matters for long poll connections like the one used to watch. With an + // interval of 0 the reverse proxy will buffer content sent on any connection + // with transfer-encoding=chunked. + // TODO: Flush after each chunk so the client doesn't suffer a 100ms latency per + // watch event. + ReverseProxyFlushInterval = 100 * time.Millisecond +) + +// FilterServer rejects requests which don't match one of the specified regular expressions +type FilterServer struct { + // Only paths that match this regexp will be accepted + AcceptPaths []*regexp.Regexp + // Paths that match this regexp will be rejected, even if they match the above + RejectPaths []*regexp.Regexp + // Hosts are required to match this list of regexp + AcceptHosts []*regexp.Regexp + // Methods that match this regexp are rejected + RejectMethods []*regexp.Regexp + // The delegate to call to handle accepted requests. + delegate http.Handler +} + +// MakeRegexpArray splits a comma separated list of regexps into an array of Regexp objects. +func MakeRegexpArray(str string) ([]*regexp.Regexp, error) { + parts := strings.Split(str, ",") + result := make([]*regexp.Regexp, len(parts)) + for ix := range parts { + re, err := regexp.Compile(parts[ix]) + if err != nil { + return nil, err + } + result[ix] = re + } + return result, nil +} + +// MakeRegexpArrayOrDie creates an array of regular expression objects from a string or exits. +func MakeRegexpArrayOrDie(str string) []*regexp.Regexp { + result, err := MakeRegexpArray(str) + if err != nil { + log.Error(err, "Error compiling re") + os.Exit(1) + } + return result +} + +func matchesRegexp(str string, regexps []*regexp.Regexp) bool { + for _, re := range regexps { + if re.MatchString(str) { + log.Info("Matched found", "MatchString", str, "Regexp", re) + return true + } + } + return false +} + +func (f *FilterServer) accept(method, path, host string) bool { + if matchesRegexp(path, f.RejectPaths) { + return false + } + if matchesRegexp(method, f.RejectMethods) { + return false + } + if matchesRegexp(path, f.AcceptPaths) && matchesRegexp(host, f.AcceptHosts) { + return true + } + return false +} + +// HandlerFor makes a shallow copy of f which passes its requests along to the +// new delegate. +func (f *FilterServer) HandlerFor(delegate http.Handler) *FilterServer { + f2 := *f + f2.delegate = delegate + return &f2 +} + +// Get host from a host header value like "localhost" or "localhost:8080" +func extractHost(header string) (host string) { + host, _, err := net.SplitHostPort(header) + if err != nil { + host = header + } + return host +} + +func (f *FilterServer) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + host := extractHost(req.Host) + if f.accept(req.Method, req.URL.Path, host) { + log.Info("Filter acception", "Request.Method", req.Method, "Request.URL", req.URL.Path, "Host", host) + f.delegate.ServeHTTP(rw, req) + return + } + log.Info("Filter rejection", "Request.Method", req.Method, "Request.URL", req.URL.Path, "Host", host) + rw.WriteHeader(http.StatusForbidden) + if _, err := rw.Write([]byte("

Unauthorized

")); err != nil { + log.Error(err, "Failed to write response body") + } +} + +// Server is a http.Handler which proxies Kubernetes APIs to remote API server. +type server struct { + Handler http.Handler +} + +type responder struct{} + +func (r *responder) Error(w http.ResponseWriter, req *http.Request, err error) { + log.Error(err, "Error while proxying request") + http.Error(w, err.Error(), http.StatusInternalServerError) +} + +// makeUpgradeTransport creates a transport that explicitly bypasses HTTP2 support +// for proxy connections that must upgrade. +func makeUpgradeTransport(config *rest.Config) (k8sproxy.UpgradeRequestRoundTripper, error) { + transportConfig, err := config.TransportConfig() + if err != nil { + return nil, err + } + tlsConfig, err := transport.TLSConfigFor(transportConfig) + if err != nil { + return nil, err + } + rt := utilnet.SetOldTransportDefaults(&http.Transport{ + TLSClientConfig: tlsConfig, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + }).DialContext, + }) + + upgrader, err := transport.HTTPWrappersForConfig(transportConfig, k8sproxy.MirrorRequest) + if err != nil { + return nil, err + } + return k8sproxy.NewUpgradeRequestRoundTripper(rt, upgrader), nil +} + +// NewServer creates and installs a new Server. +func newServer(apiProxyPrefix string, cfg *rest.Config) (*server, error) { + host := cfg.Host + if !strings.HasSuffix(host, "/") { + host = host + "/" + } + target, err := url.Parse(host) + if err != nil { + return nil, err + } + + responder := &responder{} + transport, err := rest.TransportFor(cfg) + if err != nil { + return nil, err + } + upgradeTransport, err := makeUpgradeTransport(cfg) + if err != nil { + return nil, err + } + proxy := k8sproxy.NewUpgradeAwareHandler(target, transport, false, false, responder) + proxy.UpgradeTransport = upgradeTransport + proxy.UseRequestLocation = true + + proxyServer := http.Handler(proxy) + + if !strings.HasPrefix(apiProxyPrefix, "/api") { + proxyServer = stripLeaveSlash(apiProxyPrefix, proxyServer) + } + + mux := http.NewServeMux() + mux.Handle(apiProxyPrefix, proxyServer) + return &server{Handler: mux}, nil +} + +// Listen is a simple wrapper around net.Listen. +func (s *server) Listen(address string, port int) (net.Listener, error) { + return net.Listen("tcp", fmt.Sprintf("%s:%d", address, port)) +} + +// ListenUnix does net.Listen for a unix socket +func (s *server) ListenUnix(path string) (net.Listener, error) { + // Remove any socket, stale or not, but fall through for other files + fi, err := os.Stat(path) + if err == nil && (fi.Mode()&os.ModeSocket) != 0 { + if err := os.Remove(path); err != nil { + return nil, err + } + } + // Default to only user accessible socket, caller can open up later if desired + oldmask, _ := util.Umask(0077) + l, err := net.Listen("unix", path) + if err != nil { + return l, err + } + if _, err = util.Umask(oldmask); err != nil { + return l, err + } + return l, err +} + +// ServeOnListener starts the server using given listener, loops forever. +func (s *server) ServeOnListener(l net.Listener) error { + server := http.Server{ + Handler: s.Handler, + ReadHeaderTimeout: 5 * time.Second, + } + return server.Serve(l) +} + +// like http.StripPrefix, but always leaves an initial slash. (so that our +// regexps will work.) +func stripLeaveSlash(prefix string, h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + p := strings.TrimPrefix(req.URL.Path, prefix) + if len(p) >= len(req.URL.Path) { + http.NotFound(w, req) + return + } + if len(p) > 0 && p[:1] != "/" { + p = "/" + p + } + req.URL.Path = p + h.ServeHTTP(w, req) + }) +} diff --git a/internal/ansible/proxy/proxy.go b/internal/ansible/proxy/proxy.go new file mode 100644 index 0000000..bcfa3e1 --- /dev/null +++ b/internal/ansible/proxy/proxy.go @@ -0,0 +1,393 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proxy + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strings" + "sync" + "time" + + libhandler "github.com/operator-framework/operator-lib/handler" + "github.com/operator-framework/operator-lib/predicate" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/cache" + crHandler "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/source" + + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/handler" + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/proxy/controllermap" + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/proxy/kubeconfig" + k8sRequest "github.com/operator-framework/ansible-operator-plugins/internal/ansible/proxy/requestfactory" +) + +// This is the default timeout to wait for the cache to respond +// todo(shawn-hurley): Eventually this should be configurable +const cacheEstablishmentTimeout = 6 * time.Second +const AutoSkipCacheREList = "^/api/.*/pods/.*/exec,^/api/.*/pods/.*/attach" + +// RequestLogHandler - log the requests that come through the proxy. +func RequestLogHandler(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + // read body + body, err := io.ReadAll(req.Body) + if err != nil { + log.Error(err, "Could not read request body") + } + // fix body + req.Body = io.NopCloser(bytes.NewBuffer(body)) + log.Info("Request Info", "method", req.Method, "uri", req.RequestURI, "body", string(body)) + // Removing the authorization so that the proxy can set the correct authorization. + req.Header.Del("Authorization") + h.ServeHTTP(w, req) + }) +} + +// HandlerChain will be used for users to pass defined handlers to the proxy. +// The hander chain will be run after InjectingOwnerReference if it is added +// and before the proxy handler. +type HandlerChain func(http.Handler) http.Handler + +// Options will be used by the user to specify the desired details +// for the proxy. +type Options struct { + Address string + Port int + Handler HandlerChain + KubeConfig *rest.Config + Cache cache.Cache + RESTMapper meta.RESTMapper + ControllerMap *controllermap.ControllerMap + WatchedNamespaces []string + DisableCache bool + OwnerInjection bool + LogRequests bool +} + +// Run will start a proxy server in a go routine that returns on the error +// channel if something is not correct on startup. Run will not return until +// the network socket is listening. +func Run(done chan error, o Options) error { + server, err := newServer("/", o.KubeConfig) + if err != nil { + return err + } + if o.Handler != nil { + server.Handler = o.Handler(server.Handler) + } + if o.ControllerMap == nil { + return fmt.Errorf("failed to get controller map from options") + } + if o.WatchedNamespaces == nil { + return fmt.Errorf("failed to get list of watched namespaces from options") + } + + watchedNamespaceMap := make(map[string]interface{}) + // Convert string list to map + for _, ns := range o.WatchedNamespaces { + watchedNamespaceMap[ns] = nil + } + + // Create apiResources and + discoveryClient, err := discovery.NewDiscoveryClientForConfig(o.KubeConfig) + if err != nil { + return err + } + resources := &apiResources{ + mu: &sync.RWMutex{}, + gvkToAPIResource: map[string]metav1.APIResource{}, + discoveryClient: discoveryClient, + } + + if o.Cache == nil && !o.DisableCache { + // Need to initialize cache since we don't have one + log.Info("Initializing and starting informer cache...") + informerCache, err := cache.New(o.KubeConfig, cache.Options{}) + if err != nil { + return err + } + ctx, cancel := context.WithCancel(context.TODO()) + go func() { + if err := informerCache.Start(ctx); err != nil { + log.Error(err, "Failed to start informer cache") + } + defer cancel() + }() + log.Info("Waiting for cache to sync...") + synced := informerCache.WaitForCacheSync(context.TODO()) + if !synced { + return fmt.Errorf("failed to sync cache") + } + log.Info("Cache sync was successful") + o.Cache = informerCache + } + + // Remove the authorization header so the proxy can correctly inject the header. + server.Handler = removeAuthorizationHeader(server.Handler) + + if o.OwnerInjection { + server.Handler = &injectOwnerReferenceHandler{ + next: server.Handler, + cMap: o.ControllerMap, + restMapper: o.RESTMapper, + watchedNamespaces: watchedNamespaceMap, + apiResources: resources, + } + } else { + log.Info("Warning: injection of owner references and dependent watches is turned off") + } + if o.LogRequests { + server.Handler = RequestLogHandler(server.Handler) + } + if !o.DisableCache { + autoSkipCacheRegexp, err := MakeRegexpArray(AutoSkipCacheREList) + if err != nil { + log.Error(err, "Failed to parse cache skip regular expression") + } + server.Handler = &cacheResponseHandler{ + next: server.Handler, + informerCache: o.Cache, + restMapper: o.RESTMapper, + watchedNamespaces: watchedNamespaceMap, + cMap: o.ControllerMap, + injectOwnerRef: o.OwnerInjection, + apiResources: resources, + skipPathRegexp: autoSkipCacheRegexp, + } + } + + l, err := server.Listen(o.Address, o.Port) + if err != nil { + return err + } + go func() { + log.Info("Starting to serve", "Address", l.Addr().String()) + done <- server.ServeOnListener(l) + }() + return nil +} + +// Helper function used by cache response and owner injection +func addWatchToController(owner kubeconfig.NamespacedOwnerReference, cMap *controllermap.ControllerMap, + resource *unstructured.Unstructured, restMapper meta.RESTMapper, useOwnerRef bool) error { + dataMapping, err := restMapper.RESTMapping(resource.GroupVersionKind().GroupKind(), + resource.GroupVersionKind().Version) + if err != nil { + m := fmt.Sprintf("Could not get rest mapping for: %v", resource.GroupVersionKind()) + log.Error(err, m) + return err + + } + ownerGV, err := schema.ParseGroupVersion(owner.APIVersion) + if err != nil { + m := fmt.Sprintf("could not get group version for: %v", owner) + log.Error(err, m) + return err + } + ownerMapping, err := restMapper.RESTMapping(schema.GroupKind{Kind: owner.Kind, Group: ownerGV.Group}, + ownerGV.Version) + if err != nil { + m := fmt.Sprintf("could not get rest mapping for: %v", owner) + log.Error(err, m) + return err + } + + dataNamespaceScoped := dataMapping.Scope.Name() != meta.RESTScopeNameRoot + contents, ok := cMap.Get(ownerMapping.GroupVersionKind) + if !ok { + return errors.New("failed to find controller in map") + } + owMap := contents.OwnerWatchMap + awMap := contents.AnnotationWatchMap + u := &unstructured.Unstructured{} + u.SetGroupVersionKind(ownerMapping.GroupVersionKind) + + // Add a watch to controller + if contents.WatchDependentResources && !contents.Blacklist[resource.GroupVersionKind()] { + // Store watch in map + // Use EnqueueRequestForOwner unless user has configured watching cluster scoped resources and we have to + switch { + case useOwnerRef: + _, exists := owMap.Get(resource.GroupVersionKind()) + // If already watching resource no need to add a new watch + if exists { + return nil + } + + owMap.Store(resource.GroupVersionKind()) + log.Info("Watching child resource", "kind", resource.GroupVersionKind(), + "enqueue_kind", u.GroupVersionKind()) + err := contents.Controller.Watch(&source.Kind{Type: resource}, + &handler.LoggingEnqueueRequestForOwner{ + EnqueueRequestForOwner: crHandler.EnqueueRequestForOwner{OwnerType: u}, + }, predicate.DependentPredicate{}) + // Store watch in map + if err != nil { + log.Error(err, "Failed to watch child resource", + "kind", resource.GroupVersionKind(), "enqueue_kind", u.GroupVersionKind()) + return err + } + case (!useOwnerRef && dataNamespaceScoped) || contents.WatchClusterScopedResources: + _, exists := awMap.Get(resource.GroupVersionKind()) + // If already watching resource no need to add a new watch + if exists { + return nil + } + awMap.Store(resource.GroupVersionKind()) + ownerGK := schema.GroupKind{ + Kind: owner.Kind, + Group: ownerGV.Group, + } + log.Info("Watching child resource", "kind", resource.GroupVersionKind(), + "enqueue_annotation_type", ownerGK.String()) + err = contents.Controller.Watch(&source.Kind{Type: resource}, + &handler.LoggingEnqueueRequestForAnnotation{ + EnqueueRequestForAnnotation: libhandler.EnqueueRequestForAnnotation{Type: ownerGK}, + }, predicate.DependentPredicate{}) + if err != nil { + log.Error(err, "Failed to watch child resource", + "kind", resource.GroupVersionKind(), "enqueue_kind", u.GroupVersionKind()) + return err + } + } + } else { + log.Info("Resource will not be watched/cached.", "GVK", resource.GroupVersionKind()) + } + return nil +} + +func removeAuthorizationHeader(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + req.Header.Del("Authorization") + h.ServeHTTP(w, req) + }) +} + +// Helper function used by recovering dependent watches and owner ref injection. +func getRequestOwnerRef(req *http.Request) (*kubeconfig.NamespacedOwnerReference, error) { + owner := kubeconfig.NamespacedOwnerReference{} + user, _, ok := req.BasicAuth() + if !ok { + return nil, nil + } + authString, err := base64.StdEncoding.DecodeString(user) + if err != nil { + m := "Could not base64 decode username" + log.Error(err, m) + return &owner, err + } + // Set owner to NamespacedOwnerReference, which has metav1.OwnerReference + // as a subset along with the Namespace of the owner. Please see the + // kubeconfig.NamespacedOwnerReference type for more information. The + // namespace is required when creating the reconcile requests. + if err := json.Unmarshal(authString, &owner); err != nil { + m := "Could not unmarshal auth string" + log.Error(err, m) + return &owner, err + } + return &owner, err +} + +func getGVKFromRequestInfo(r *k8sRequest.RequestInfo, restMapper meta.RESTMapper) (schema.GroupVersionKind, error) { + gvr := schema.GroupVersionResource{ + Group: r.APIGroup, + Version: r.APIVersion, + Resource: r.Resource, + } + return restMapper.KindFor(gvr) +} + +type apiResources struct { + mu *sync.RWMutex + gvkToAPIResource map[string]metav1.APIResource + discoveryClient discovery.DiscoveryInterface +} + +func (a *apiResources) resetResources() error { + a.mu.Lock() + defer a.mu.Unlock() + + _, apisResourceList, err := a.discoveryClient.ServerGroupsAndResources() + if err != nil { + return err + } + + a.gvkToAPIResource = map[string]metav1.APIResource{} + + for _, apiResource := range apisResourceList { + gv, err := schema.ParseGroupVersion(apiResource.GroupVersion) + if err != nil { + return err + } + for _, resource := range apiResource.APIResources { + // Names containing a "/" are subresources and should be ignored + if strings.Contains(resource.Name, "/") { + continue + } + gvk := schema.GroupVersionKind{ + Group: gv.Group, + Version: gv.Version, + Kind: resource.Kind, + } + + a.gvkToAPIResource[gvk.String()] = resource + } + } + + return nil +} + +func (a *apiResources) IsVirtualResource(gvk schema.GroupVersionKind) (bool, error) { + a.mu.RLock() + apiResource, ok := a.gvkToAPIResource[gvk.String()] + a.mu.RUnlock() + + if !ok { + //reset the resources + err := a.resetResources() + if err != nil { + return false, err + } + // retry to get the resource + a.mu.RLock() + apiResource, ok = a.gvkToAPIResource[gvk.String()] + a.mu.RUnlock() + if !ok { + return false, fmt.Errorf("unable to get api resource for gvk: %v", gvk) + } + } + + allVerbs := discovery.SupportsAllVerbs{ + Verbs: []string{"watch", "get", "list"}, + } + + if !allVerbs.Match(gvk.GroupVersion().String(), &apiResource) { + return true, nil + } + + return false, nil +} diff --git a/internal/ansible/proxy/proxy_suite_test.go b/internal/ansible/proxy/proxy_suite_test.go new file mode 100644 index 0000000..604657d --- /dev/null +++ b/internal/ansible/proxy/proxy_suite_test.go @@ -0,0 +1,128 @@ +// Copyright 2021 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proxy + +import ( + "context" + "fmt" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/proxy/controllermap" + kcorev1 "k8s.io/api/core/v1" + kmetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +var testMgr manager.Manager + +var testClient client.Client + +func TestProxy(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Proxy Test Suite") +} + +var _ = BeforeSuite(func() { + if testing.Short() { + return + } + var err error + testMgr, err = manager.New(config.GetConfigOrDie(), manager.Options{Namespace: "default"}) + if err != nil { + Fail(fmt.Sprintf("Failed to instantiate manager: %v", err)) + } + done := make(chan error) + cMap := controllermap.NewControllerMap() + err = Run(done, Options{ + Address: "localhost", + Port: 8888, + KubeConfig: testMgr.GetConfig(), + Cache: nil, + RESTMapper: testMgr.GetRESTMapper(), + ControllerMap: cMap, + WatchedNamespaces: []string{"test-watched-namespace"}, + OwnerInjection: true, + }) + if err != nil { + Fail(fmt.Sprintf("Error starting proxy: %v", err)) + } + testClient, err = client.New(testMgr.GetConfig(), client.Options{}) + if err != nil { + Fail(fmt.Sprintf("Failed to create the client: %v", err)) + } + _, err = createTestNamespace("test-watched-namespace", testClient) + if err != nil { + Fail(fmt.Sprintf("Failed to create watched namespace: %v", err)) + } +}) + +var _ = AfterSuite(func() { + if testing.Short() { + return + } + err := testClient.Delete(context.Background(), &kcorev1.Namespace{ + ObjectMeta: kmetav1.ObjectMeta{ + Name: "test-watched-namespace", + Labels: map[string]string{ + "test-label": "test-watched-namespace", + }, + }, + }) + + if err != nil { + Fail(fmt.Sprintf("Failed to clean up namespace: %v:", err)) + } +}) + +func createTestNamespace(name string, cl client.Client) (client.Object, error) { + ns := &kcorev1.Namespace{ + ObjectMeta: kmetav1.ObjectMeta{ + Name: name, + Labels: map[string]string{ + "test-label": name, + }, + }, + } + if err := cl.Create(context.Background(), ns); err != nil { + return nil, err + } + return ns, nil +} + +func createTestPod(name, namespace string, cl client.Client) (client.Object, error) { + three := int64(3) + pod := &kcorev1.Pod{ + ObjectMeta: kmetav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{ + "test-label": name, + }, + }, + Spec: kcorev1.PodSpec{ + Containers: []kcorev1.Container{{Name: "nginx", Image: "nginx"}}, + RestartPolicy: "Always", + ActiveDeadlineSeconds: &three, + }, + } + if err := cl.Create(context.Background(), pod); err != nil { + return nil, err + } + return pod, nil +} diff --git a/internal/ansible/proxy/proxy_test.go b/internal/ansible/proxy/proxy_test.go new file mode 100644 index 0000000..e28d1d0 --- /dev/null +++ b/internal/ansible/proxy/proxy_test.go @@ -0,0 +1,77 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proxy + +import ( + "context" + "encoding/json" + "errors" + "io" + "net/http" + "os" + "testing" + + . "github.com/onsi/ginkgo/v2" + + kcorev1 "k8s.io/api/core/v1" +) + +var _ = Describe("proxyTests", func() { + t := GinkgoT() + + It("should retrieve resources from the cache", func() { + if testing.Short() { + Skip("skipping ansible proxy testing in short mode") + } + po, err := createTestPod("test", "test-watched-namespace", testClient) + if err != nil { + t.Fatalf("Failed to create the pod: %v", err) + } + defer func() { + if err := testClient.Delete(context.Background(), po); err != nil { + t.Fatalf("Failed to delete the pod: %v", err) + } + }() + + resp, err := http.Get("http://localhost:8888/api/v1/namespaces/test-watched-namespace/pods/test") + if err != nil { + t.Fatalf("Error getting pod from proxy: %v", err) + } + defer func() { + if err := resp.Body.Close(); err != nil && !errors.Is(err, os.ErrClosed) { + t.Errorf("Failed to close response body: (%v)", err) + } + }() + body, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("Error reading response body: %v", err) + } + // Should only be one string from 'X-Cache' header (explicitly set to HIT in proxy) + if resp.Header["X-Cache"] == nil { + t.Fatalf("Object was not retrieved from cache") + } + if resp.Header["X-Cache"][0] != "HIT" { + t.Fatalf("Cache response header found but got [%v], expected [HIT]", resp.Header["X-Cache"][0]) + } + data := kcorev1.Pod{} + err = json.Unmarshal(body, &data) + if err != nil { + t.Fatalf("Error parsing response: %v", err) + } + if data.Name != "test" { + t.Fatalf("Got unexpected pod name: %#v", data.Name) + } + }) +}) diff --git a/internal/ansible/proxy/requestfactory/requestinfo.go b/internal/ansible/proxy/requestfactory/requestinfo.go new file mode 100644 index 0000000..5d3d73b --- /dev/null +++ b/internal/ansible/proxy/requestfactory/requestinfo.go @@ -0,0 +1,278 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This code was retrieved from +// https://github.com/kubernetes/apiserver/blob/master/pkg/endpoints/request/requestinfo.go +// and slightly modified for use in this project + +package requestfactory + +import ( + "fmt" + "net/http" + "strings" + + "k8s.io/apimachinery/pkg/api/validation/path" + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metainternalscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/set" + + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("requestfactory") + +// RequestInfo holds information parsed from the http.Request +type RequestInfo struct { + // IsResourceRequest indicates whether or not the request is for an API + // resource or subresource + IsResourceRequest bool + // Path is the URL path of the request + Path string + // Verb is the kube verb associated with the request for API requests, not + // the http verb. This includes things like list and watch. for + // non-resource requests, this is the lowercase http verb + Verb string + + APIPrefix string + APIGroup string + APIVersion string + Namespace string + // Resource is the name of the resource being requested. This is not the + // kind. For example: pods + Resource string + // Subresource is the name of the subresource being requested. This is a + // different resource, scoped to the parent resource, but it may have a + // different kind. For instance, /pods has the resource "pods" and the kind + // "Pod", while /pods/foo/status has the resource "pods", the sub resource + // "status", and the kind "Pod" (because status operates on pods). The + // binding resource for a pod though may be /pods/foo/binding, which has + // resource "pods", subresource "binding", and kind "Binding". + Subresource string + // Name is empty for some verbs, but if the request directly indicates a name + // (not in body content) then this field is filled in. + Name string + // Parts are the path parts for the request, always starting with + // /{resource}/{name} + Parts []string +} + +// specialVerbs contains just strings which are used in REST paths for special +// actions that don't fall under the normal CRUDdy GET/POST/PUT/DELETE actions +// on REST objects. TODO: find a way to keep this up to date automatically. +// Maybe dynamically populate list as handlers added to master's Mux. +var specialVerbs = set.New("proxy", "watch") + +// specialVerbsNoSubresources contains root verbs which do not allow +// subresources +var specialVerbsNoSubresources = set.New("proxy") + +// namespaceSubresources contains subresources of namespace this list allows +// the parser to distinguish between a namespace subresource, and a namespaced +// resource +var namespaceSubresources = set.New("status", "finalize") + +// NamespaceSubResourcesForTest exports namespaceSubresources for testing in +// pkg/master/master_test.go, so we never drift +var NamespaceSubResourcesForTest = set.New(namespaceSubresources.SortedList()...) + +type RequestInfoFactory struct { + APIPrefixes set.Set[string] // without leading and trailing slashes + GrouplessAPIPrefixes set.Set[string] // without leading and trailing slashes +} + +// TODO write an integration test against the swagger doc to test the +// RequestInfo and match up behavior to responses NewRequestInfo returns the +// information from the http request. If error is not nil, RequestInfo holds +// the information as best it is known before the failure It handles both +// resource and non-resource requests and fills in all the pertinent +// information for each. +// Valid Inputs: +// Resource paths +// /apis/{api-group}/{version}/namespaces +// /api/{version}/namespaces +// /api/{version}/namespaces/{namespace} +// /api/{version}/namespaces/{namespace}/{resource} +// /api/{version}/namespaces/{namespace}/{resource}/{resourceName} +// /api/{version}/{resource} +// /api/{version}/{resource}/{resourceName} +// +// Special verbs without subresources: +// /api/{version}/proxy/{resource}/{resourceName} +// /api/{version}/proxy/namespaces/{namespace}/{resource}/{resourceName} +// +// Special verbs with subresources: +// /api/{version}/watch/{resource} +// /api/{version}/watch/namespaces/{namespace}/{resource} +// +// NonResource paths +// /apis/{api-group}/{version} +// /apis/{api-group} +// /apis +// /api/{version} +// /api +// /healthz + +func (r *RequestInfoFactory) NewRequestInfo(req *http.Request) (*RequestInfo, error) { //nolint:gocyclo + // TODO: Try to reduce the complexity of this last measured at 33 (failing at > 30) and remove the // nolint:gocyclo + // start with a non-resource request until proven otherwise + requestInfo := RequestInfo{ + IsResourceRequest: false, + Path: req.URL.Path, + Verb: strings.ToLower(req.Method), + } + + currentParts := splitPath(req.URL.Path) + if len(currentParts) < 3 { + // return a non-resource request + return &requestInfo, nil + } + + if !r.APIPrefixes.Has(currentParts[0]) { + // return a non-resource request + return &requestInfo, nil + } + requestInfo.APIPrefix = currentParts[0] + currentParts = currentParts[1:] + + if !r.GrouplessAPIPrefixes.Has(requestInfo.APIPrefix) { + // one part (APIPrefix) has already been consumed, so this is actually "do + // we have four parts?" + if len(currentParts) < 3 { + // return a non-resource request + return &requestInfo, nil + } + + requestInfo.APIGroup = currentParts[0] + currentParts = currentParts[1:] + } + + requestInfo.IsResourceRequest = true + requestInfo.APIVersion = currentParts[0] + currentParts = currentParts[1:] + + // handle input of form /{specialVerb}/* + if specialVerbs.Has(currentParts[0]) { + if len(currentParts) < 2 { + return &requestInfo, fmt.Errorf("unable to determine kind and namespace from url, %v", req.URL) + } + + requestInfo.Verb = currentParts[0] + currentParts = currentParts[1:] + + } else { + switch req.Method { + case "POST": + requestInfo.Verb = "create" + case "GET", "HEAD": + requestInfo.Verb = "get" + case "PUT": + requestInfo.Verb = "update" + case "PATCH": + requestInfo.Verb = "patch" + case "DELETE": + requestInfo.Verb = "delete" + default: + requestInfo.Verb = "" + } + } + + // URL forms: /namespaces/{namespace}/{kind}/*, where parts are adjusted to + // be relative to kind + if currentParts[0] == "namespaces" { + if len(currentParts) > 1 { + requestInfo.Namespace = currentParts[1] + + // if there is another step after the namespace name and it is not a + // known namespace subresource move currentParts to include it as a + // resource in its own right + if len(currentParts) > 2 && !namespaceSubresources.Has(currentParts[2]) { + currentParts = currentParts[2:] + } + } + } else { + requestInfo.Namespace = metav1.NamespaceNone + } + + // parsing successful, so we now know the proper value for .Parts + requestInfo.Parts = currentParts + + // parts look like: + // resource/resourceName/subresource/other/stuff/we/don't/interpret + switch { + case len(requestInfo.Parts) >= 3 && !specialVerbsNoSubresources.Has(requestInfo.Verb): + requestInfo.Subresource = requestInfo.Parts[2] + fallthrough + case len(requestInfo.Parts) >= 2: + requestInfo.Name = requestInfo.Parts[1] + fallthrough + case len(requestInfo.Parts) >= 1: + requestInfo.Resource = requestInfo.Parts[0] + } + + // if there's no name on the request and we thought it was a get before, then + // the actual verb is a list or a watch + if len(requestInfo.Name) == 0 && requestInfo.Verb == "get" { + opts := metainternalversion.ListOptions{} + if err := metainternalscheme.ParameterCodec.DecodeParameters(req.URL.Query(), metav1.SchemeGroupVersion, + &opts); err != nil { + // An error in parsing request will result in default to "list" and not + // setting "name" field. + log.Error(err, "Could not parse request") + // Reset opts to not rely on partial results from parsing. + // However, if watch is set, let's report it. + opts = metainternalversion.ListOptions{} + if values := req.URL.Query()["watch"]; len(values) > 0 { + switch strings.ToLower(values[0]) { + case "false", "0": + default: + opts.Watch = true + } + } + } + + if opts.Watch { + requestInfo.Verb = "watch" + } else { + requestInfo.Verb = "list" + } + + if opts.FieldSelector != nil { + if name, ok := opts.FieldSelector.RequiresExactMatch("metadata.name"); ok { + if len(path.IsValidPathSegmentName(name)) == 0 { + requestInfo.Name = name + } + } + } + } + // if there's no name on the request and we thought it was a delete before, + // then the actual verb is deletecollection + if len(requestInfo.Name) == 0 && requestInfo.Verb == "delete" { + requestInfo.Verb = "deletecollection" + } + + return &requestInfo, nil +} + +// splitPath returns the segments for a URL path. +func splitPath(path string) []string { + path = strings.Trim(path, "/") + if path == "" { + return []string{} + } + return strings.Split(path, "/") +} diff --git a/internal/ansible/runner/eventapi/eventapi.go b/internal/ansible/runner/eventapi/eventapi.go new file mode 100644 index 0000000..cb75aa0 --- /dev/null +++ b/internal/ansible/runner/eventapi/eventapi.go @@ -0,0 +1,177 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package eventapi + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net" + "net/http" + "os" + "strings" + "sync" + "time" + + "github.com/go-logr/logr" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +// EventReceiver serves the event API +type EventReceiver struct { + // Events is the channel used by the event API handler to send JobEvents + // back to the runner, or whatever code is using this receiver. + Events chan JobEvent + + // SocketPath is the path on the filesystem to a unix streaming socket + SocketPath string + + // URLPath is the path portion of the url at which events should be + // received. For example, "/events/" + URLPath string + + // server is the http.Server instance that serves the event API. It must be + // closed. + server io.Closer + + // stopped indicates if this receiver has permanently stopped receiving + // events. When true, requests to POST an event will receive a "410 Gone" + // response, and the body will be ignored. + stopped bool + + // mutex controls access to the "stopped" bool above, ensuring that writes + // are goroutine-safe. + mutex sync.RWMutex + + // ident is the unique identifier for a particular run of ansible-runner + ident string + + // logger holds a logger that has some fields already set + logger logr.Logger +} + +func New(ident string, errChan chan<- error) (*EventReceiver, error) { + sockPath := fmt.Sprintf("/tmp/ansibleoperator-%s", ident) + listener, err := net.Listen("unix", sockPath) + if err != nil { + return nil, err + } + + rec := EventReceiver{ + Events: make(chan JobEvent, 1000), + SocketPath: sockPath, + URLPath: "/events/", + ident: ident, + logger: logf.Log.WithName("eventapi").WithValues("job", ident), + } + + mux := http.NewServeMux() + mux.HandleFunc(rec.URLPath, rec.handleEvents) + srv := http.Server{Handler: mux, ReadHeaderTimeout: 5 * time.Second} + rec.server = &srv + + go func() { + errChan <- srv.Serve(listener) + }() + return &rec, nil +} + +// Close ensures that appropriate resources are cleaned up, such as any unix +// streaming socket that may be in use. Close must be called. +func (e *EventReceiver) Close() { + e.mutex.Lock() + e.stopped = true + e.mutex.Unlock() + e.logger.V(1).Info("Event API stopped") + if err := e.server.Close(); err != nil && !errors.Is(err, os.ErrClosed) { + e.logger.Error(err, "Failed to close event receiver") + } + os.Remove(e.SocketPath) + close(e.Events) +} + +func (e *EventReceiver) handleEvents(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != e.URLPath { + http.NotFound(w, r) + e.logger.Info("Path not found", "code", "404", "Request.Path", r.URL.Path) + return + } + + if r.Method != http.MethodPost { + e.logger.Info("Method not allowed", "code", "405", "Request.Method", r.Method) + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + ct := r.Header.Get("content-type") + if strings.Split(ct, ";")[0] != "application/json" { + e.logger.Info("Wrong content type", "code", "415", "Request.Content-Type", ct) + w.WriteHeader(http.StatusUnsupportedMediaType) + if _, err := w.Write([]byte("The content-type must be \"application/json\"")); err != nil { + e.logger.Error(err, "Failed to write response body") + } + return + } + + body, err := io.ReadAll(r.Body) + if err != nil { + e.logger.Error(err, "Could not read request body", "code", "500") + w.WriteHeader(http.StatusInternalServerError) + return + } + + event := JobEvent{} + err = json.Unmarshal(body, &event) + if err != nil { + e.logger.Info("Could not deserialize body.", "code", "400", "Error", err) + w.WriteHeader(http.StatusBadRequest) + if _, err := w.Write([]byte("Could not deserialize body as JSON")); err != nil { + e.logger.Error(err, "Failed to write response body") + } + return + } + + // Guarantee that the Events channel will not be written to if stopped == + // true, because in that case the channel has been closed. + e.mutex.RLock() + defer e.mutex.RUnlock() + if e.stopped { + e.mutex.RUnlock() + w.WriteHeader(http.StatusGone) + e.logger.Info("Stopped and not accepting additional events for this job", "code", "410") + return + } + // ansible-runner sends "status events" and "ansible events". The "status + // events" signify a change in the state of ansible-runner itself, which + // we're not currently interested in. + // https://ansible-runner.readthedocs.io/en/latest/external_interface.html#event-structure + if event.UUID == "" { + e.logger.V(1).Info("Dropping event that is not a JobEvent") + e.logger.V(2).Info("Dropped event", "event", event, "request", string(body)) + } else { + // timeout if the channel blocks for too long + timeout := time.NewTimer(10 * time.Second) + select { + case e.Events <- event: + case <-timeout.C: + e.logger.Info("Timed out writing event to channel", "code", "500") + w.WriteHeader(http.StatusInternalServerError) + return + } + _ = timeout.Stop() + } + w.WriteHeader(http.StatusNoContent) +} diff --git a/internal/ansible/runner/eventapi/types.go b/internal/ansible/runner/eventapi/types.go new file mode 100644 index 0000000..669a67b --- /dev/null +++ b/internal/ansible/runner/eventapi/types.go @@ -0,0 +1,138 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package eventapi + +import ( + "fmt" + "strings" + "time" +) + +const ( + // Ansible Events + + // EventPlaybookOnTaskStart - playbook is starting to run a task. + EventPlaybookOnTaskStart = "playbook_on_task_start" + // EventRunnerOnOk - task finished with ok status. + EventRunnerOnOk = "runner_on_ok" + // EventRunnerOnFailed - task finished with failed status. + EventRunnerOnFailed = "runner_on_failed" + // EventPlaybookOnStats - playbook has finished running. + EventPlaybookOnStats = "playbook_on_stats" + // EventRunnerItemOnOk - item finished with ok status. + EventRunnerItemOnOk = "runner_item_on_ok" + + // Ansible Task Actions + + // TaskActionSetFact - task action of setting a fact. + TaskActionSetFact = "set_fact" + // TaskActionDebug - task action of printing a debug message. + TaskActionDebug = "debug" + + // defaultFailedMessage - Default failed playbook message + defaultFailedMessage = "unknown playbook failure" +) + +// EventTime - time to unmarshal nano time. +type EventTime struct { + time.Time +} + +// UnmarshalJSON - override unmarshal json. +func (e *EventTime) UnmarshalJSON(b []byte) (err error) { + e.Time, err = time.Parse("2006-01-02T15:04:05.999999999", strings.Trim(string(b[:]), "\"\\")) + return +} + +// MarshalJSON - override the marshal json. +func (e EventTime) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf("\"%s\"", e.Time.Format("2006-01-02T15:04:05.99999999"))), nil +} + +// JobEvent - event of an ansible run. +type JobEvent struct { + UUID string `json:"uuid"` + Counter int `json:"counter"` + StdOut string `json:"stdout"` + StartLine int `json:"start_line"` + EndLine int `json:"EndLine"` + Event string `json:"event"` + EventData map[string]interface{} `json:"event_data"` + PID int `json:"pid"` + Created EventTime `json:"created"` +} + +// StatusJobEvent - event of an ansible run. +type StatusJobEvent struct { + UUID string `json:"uuid"` + Counter int `json:"counter"` + StdOut string `json:"stdout"` + StartLine int `json:"start_line"` + EndLine int `json:"EndLine"` + Event string `json:"event"` + EventData StatsEventData `json:"event_data"` + PID int `json:"pid"` + Created EventTime `json:"created"` +} + +// StatsEventData - data for a the status event. +type StatsEventData struct { + Playbook string `json:"playbook"` + PlaybookUUID string `json:"playbook_uuid"` + Changed map[string]int `json:"changed"` + Ok map[string]int `json:"ok"` + Failures map[string]int `json:"failures"` + Skipped map[string]int `json:"skipped"` +} + +// FailureMessages - failure messages from the event api +type FailureMessages []string + +// GetFailedPlaybookMessage - get the failure message from res.msg +func (je JobEvent) GetFailedPlaybookMessage() string { + message := defaultFailedMessage + result, ok := je.EventData["res"].(map[string]interface{}) + if !ok { + return message + } + if m, ok := result["msg"].(string); ok { + message = m + } + return message +} + +// IgnoreError - Does the job event contain the ignore_error ansible flag +func (je JobEvent) IgnoreError() bool { + ignoreErrors, ok := je.EventData["ignore_errors"] + if !ok { + return false + } + if b, ok := ignoreErrors.(bool); ok && b { + return b + } + return false +} + +// Rescued - Detects whether or not a task was rescued +func (je JobEvent) Rescued() bool { + if rescued, contains := je.EventData["rescued"]; contains { + for _, v := range rescued.(map[string]interface{}) { + if int(v.(float64)) == 1 { + return true + } + } + } + return false +} diff --git a/internal/ansible/runner/fake/runner.go b/internal/ansible/runner/fake/runner.go new file mode 100644 index 0000000..5cc2152 --- /dev/null +++ b/internal/ansible/runner/fake/runner.go @@ -0,0 +1,96 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fake + +import ( + "fmt" + "time" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/runner" + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/runner/eventapi" +) + +// Runner - implements the Runner interface for a GVK that's being watched. +type Runner struct { + Finalizer string + ReconcilePeriod time.Duration + ManageStatus bool + WatchDependentResources bool + WatchClusterScopedResources bool + // Used to send error if Run should fail. + Error error + // Job Events that will be sent back from the runs channel + JobEvents []eventapi.JobEvent + //Stdout standard out to reply if failure occurs. + Stdout string +} + +type runResult struct { + events <-chan eventapi.JobEvent + stdout string +} + +func (r *runResult) Events() <-chan eventapi.JobEvent { + return r.events +} + +func (r *runResult) Stdout() (string, error) { + if r.stdout != "" { + return r.stdout, nil + } + return r.stdout, fmt.Errorf("unable to find standard out") +} + +// Run - runs the fake runner. +func (r *Runner) Run(_ string, u *unstructured.Unstructured, _ string) (runner.RunResult, error) { + if r.Error != nil { + return nil, r.Error + } + c := make(chan eventapi.JobEvent) + go func() { + for _, je := range r.JobEvents { + c <- je + } + close(c) + }() + return &runResult{events: c, stdout: r.Stdout}, nil +} + +// GetReconcilePeriod - new reconcile period. +func (r *Runner) GetReconcilePeriod() (time.Duration, bool) { + return r.ReconcilePeriod, r.ReconcilePeriod != time.Duration(0) +} + +// GetManageStatus - get managestatus. +func (r *Runner) GetManageStatus() bool { + return r.ManageStatus +} + +// GetWatchDependentResources - get watchDependentResources. +func (r *Runner) GetWatchDependentResources() bool { + return r.WatchDependentResources +} + +// GetWatchClusterScopedResources - get watchClusterScopedResources. +func (r *Runner) GetWatchClusterScopedResources() bool { + return r.WatchClusterScopedResources +} + +// GetFinalizer - gets the fake finalizer. +func (r *Runner) GetFinalizer() (string, bool) { + return r.Finalizer, r.Finalizer != "" +} diff --git a/internal/ansible/runner/internal/inputdir/inputdir.go b/internal/ansible/runner/internal/inputdir/inputdir.go new file mode 100644 index 0000000..48490a4 --- /dev/null +++ b/internal/ansible/runner/internal/inputdir/inputdir.go @@ -0,0 +1,208 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package inputdir + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/spf13/afero" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("inputdir") + +// InputDir represents an input directory for ansible-runner. +type InputDir struct { + Path string + PlaybookPath string + Parameters map[string]interface{} + EnvVars map[string]string + Settings map[string]string + CmdLine string +} + +// makeDirs creates the required directory structure. +func (i *InputDir) makeDirs() error { + for _, path := range []string{"env", "project", "inventory"} { + fullPath := filepath.Join(i.Path, path) + err := os.MkdirAll(fullPath, os.ModePerm) + if err != nil { + log.Error(err, "Unable to create directory", "Path", fullPath) + return err + } + } + return nil +} + +// addFile adds a file to the given relative path within the input directory. +func (i *InputDir) addFile(path string, content []byte) error { + fullPath := filepath.Join(i.Path, path) + err := os.WriteFile(fullPath, content, 0644) + if err != nil { + log.Error(err, "Unable to write file", "Path", fullPath) + } + return err +} + +// copyInventory copies a file or directory from src to dst +func (i *InputDir) copyInventory(src string, dst string) error { + fs := afero.NewOsFs() + return afero.Walk(fs, src, + func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + fullDst := strings.Replace(path, src, dst, 1) + if info.IsDir() { + if err = fs.MkdirAll(fullDst, info.Mode()); err != nil { + return err + } + } else { + f, err := fs.Open(path) + if err != nil { + return err + } + if err = afero.WriteReader(fs, fullDst, f); err != nil { + return err + } + if err = fs.Chmod(fullDst, info.Mode()); err != nil { + return err + } + } + return nil + }) +} + +// Stdout reads the stdout from the ansible artifact that corresponds to the +// given ident and returns it as a string. +func (i *InputDir) Stdout(ident string) (string, error) { + errorPath := filepath.Join(i.Path, "artifacts", ident, "stdout") + errorText, err := os.ReadFile(errorPath) + return string(errorText), err +} + +// Write commits the object's state to the filesystem at i.Path. +func (i *InputDir) Write() error { + paramBytes, err := json.Marshal(i.Parameters) + if err != nil { + return err + } + envVarBytes, err := json.Marshal(i.EnvVars) + if err != nil { + return err + } + settingsBytes, err := json.Marshal(i.Settings) + if err != nil { + return err + } + + err = i.makeDirs() + if err != nil { + return err + } + + err = i.addFile("env/envvars", envVarBytes) + if err != nil { + return err + } + err = i.addFile("env/extravars", paramBytes) + if err != nil { + return err + } + err = i.addFile("env/settings", settingsBytes) + if err != nil { + return err + } + + // Trimming off the first and last characters if the command is wrapped by single quotations + if strings.HasPrefix(i.CmdLine, string("'")) && i.CmdLine[0] == i.CmdLine[len(i.CmdLine)-1] { + i.CmdLine = i.CmdLine[1 : len(i.CmdLine)-1] + } + + cmdLineBytes := []byte(i.CmdLine) + if len(cmdLineBytes) > 0 { + err = i.addFile("env/cmdline", cmdLineBytes) + if err != nil { + return err + } + } + + // ANSIBLE_INVENTORY takes precedence over our generated hosts file + // so if the envvar is set we don't bother making it, we just copy + // the inventory into our runner directory + ansibleInventory := os.Getenv("ANSIBLE_INVENTORY") + if ansibleInventory == "" { + // If ansible-runner is running in a python virtual environment, propagate + // that to ansible. + venv := os.Getenv("VIRTUAL_ENV") + hosts := "localhost ansible_connection=local" + if venv != "" { + hosts = fmt.Sprintf("%s ansible_python_interpreter=%s", hosts, filepath.Join(venv, "bin", "python3")) + } else { + hosts = fmt.Sprintf("%s ansible_python_interpreter=%s", hosts, "{{ansible_playbook_python}}") + } + err = i.addFile("inventory/hosts", []byte(hosts)) + if err != nil { + return err + } + } else { + fi, err := os.Stat(ansibleInventory) + if err != nil { + return err + } + switch mode := fi.Mode(); { + case mode.IsDir(): + err = i.copyInventory(ansibleInventory, filepath.Join(i.Path, "inventory")) + if err != nil { + return err + } + case mode.IsRegular(): + err = i.copyInventory(ansibleInventory, filepath.Join(i.Path, "inventory/hosts")) + if err != nil { + return err + } + } + } + + if i.PlaybookPath != "" { + f, err := os.Open(i.PlaybookPath) + if err != nil { + log.Error(err, "Failed to open playbook file", "Path", i.PlaybookPath) + return err + } + defer func() { + if err := f.Close(); err != nil && !errors.Is(err, os.ErrClosed) { + log.Error(err, "Failed to close playbook file") + } + }() + + playbookBytes, err := io.ReadAll(f) + if err != nil { + return err + } + + err = i.addFile("project/playbook.yaml", playbookBytes) + if err != nil { + return err + } + } + return nil +} diff --git a/internal/ansible/runner/runner.go b/internal/ansible/runner/runner.go new file mode 100644 index 0000000..2cfee42 --- /dev/null +++ b/internal/ansible/runner/runner.go @@ -0,0 +1,455 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runner + +import ( + "errors" + "fmt" + "net/http" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/metrics" + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/paramconv" + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/runner/eventapi" + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/runner/internal/inputdir" + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/watches" +) + +var log = logf.Log.WithName("runner") + +const ( + // MaxRunnerArtifactsAnnotation - annotation used by a user to specify the max artifacts to keep + // in the runner directory. This will override the value provided by the watches file for a + // particular CR. Setting this to zero will cause all artifact directories to be kept. + // Example usage "ansible.sdk.operatorframework.io/max-runner-artifacts: 100" + MaxRunnerArtifactsAnnotation = "ansible.sdk.operatorframework.io/max-runner-artifacts" + + // AnsibleVerbosityAnnotation - annotation used by a user to specify the verbosity given + // to the ansible-runner command. This will override the value for a particular CR. + // Example usage "ansible.sdk.operatorframework.io/verbosity: 5" + AnsibleVerbosityAnnotation = "ansible.sdk.operatorframework.io/verbosity" + + ansibleRunnerBin = "ansible-runner" +) + +// Runner - a runnable that should take the parameters and name and namespace +// and run the correct code. +type Runner interface { + Run(string, *unstructured.Unstructured, string) (RunResult, error) + GetFinalizer() (string, bool) +} + +// ansibleVerbosityString will return the string with the -v* levels +func ansibleVerbosityString(verbosity int) string { + if verbosity > 0 { + // the default verbosity is 0 + // more info: https://docs.ansible.com/ansible/latest/reference_appendices/config.html#default-verbosity + return fmt.Sprintf("-%v", strings.Repeat("v", verbosity)) + } + // Return default verbosity + return "" +} + +type cmdFuncType func(ident, inputDirPath string, maxArtifacts, verbosity int) *exec.Cmd + +func playbookCmdFunc(path string) cmdFuncType { + return func(ident, inputDirPath string, maxArtifacts, verbosity int) *exec.Cmd { + cmdArgs := []string{"run", inputDirPath} + cmdOptions := []string{ + "--rotate-artifacts", fmt.Sprintf("%v", maxArtifacts), + "-p", path, + "-i", ident, + } + + // check the verbosity since the exec.Command will fail if an arg as "" or " " be informed + if verbosity > 0 { + cmdOptions = append(cmdOptions, ansibleVerbosityString(verbosity)) + } + return exec.Command("ansible-runner", append(cmdArgs, cmdOptions...)...) + } +} + +func roleCmdFunc(path string) cmdFuncType { + rolePath, roleName := filepath.Split(path) + return func(ident, inputDirPath string, maxArtifacts, verbosity int) *exec.Cmd { + // check the verbosity since the exec.Command will fail if an arg as "" or " " be informed + + cmdOptions := []string{ + "--rotate-artifacts", fmt.Sprintf("%v", maxArtifacts), + "--role", roleName, + "--roles-path", rolePath, + "--hosts", "localhost", + "-i", ident, + } + cmdArgs := []string{"run", inputDirPath} + + if verbosity > 0 { + cmdOptions = append(cmdOptions, ansibleVerbosityString(verbosity)) + } + ansibleGathering := os.Getenv("ANSIBLE_GATHERING") + + // When running a role directly, ansible-runner does not respect the ANSIBLE_GATHERING + // environment variable, so we need to skip fact collection manually + if ansibleGathering == "explicit" { + cmdOptions = append(cmdOptions, "--role-skip-facts") + } + return exec.Command("ansible-runner", append(cmdArgs, cmdOptions...)...) + } +} + +// New - creates a Runner from a Watch struct +func New(watch watches.Watch, runnerArgs string) (Runner, error) { + var path string + var cmdFunc, finalizerCmdFunc cmdFuncType + + err := watch.Validate() + if err != nil { + log.Error(err, "Failed to validate watch") + return nil, err + } + + switch { + case watch.Playbook != "": + path = watch.Playbook + cmdFunc = playbookCmdFunc(path) + case watch.Role != "": + path = watch.Role + cmdFunc = roleCmdFunc(path) + } + + // handle finalizer + switch { + case watch.Finalizer == nil: + finalizerCmdFunc = nil + case watch.Finalizer.Playbook != "": + finalizerCmdFunc = playbookCmdFunc(watch.Finalizer.Playbook) + case watch.Finalizer.Role != "": + finalizerCmdFunc = roleCmdFunc(watch.Finalizer.Role) + default: + finalizerCmdFunc = cmdFunc + } + + return &runner{ + Path: path, + cmdFunc: cmdFunc, + Vars: watch.Vars, + Finalizer: watch.Finalizer, + finalizerCmdFunc: finalizerCmdFunc, + GVK: watch.GroupVersionKind, + maxRunnerArtifacts: watch.MaxRunnerArtifacts, + ansibleVerbosity: watch.AnsibleVerbosity, + ansibleArgs: runnerArgs, + snakeCaseParameters: watch.SnakeCaseParameters, + markUnsafe: watch.MarkUnsafe, + }, nil +} + +// runner - implements the Runner interface for a GVK that's being watched. +type runner struct { + Path string // path on disk to a playbook or role depending on what cmdFunc expects + GVK schema.GroupVersionKind // GVK being watched that corresponds to the Path + Finalizer *watches.Finalizer + Vars map[string]interface{} + cmdFunc cmdFuncType // returns a Cmd that runs ansible-runner + finalizerCmdFunc cmdFuncType + maxRunnerArtifacts int + ansibleVerbosity int + snakeCaseParameters bool + markUnsafe bool + ansibleArgs string +} + +func (r *runner) Run(ident string, u *unstructured.Unstructured, kubeconfig string) (RunResult, error) { + if _, err := exec.LookPath(ansibleRunnerBin); err != nil { + return nil, err + } + + timer := metrics.ReconcileTimer(r.GVK.String()) + defer timer.ObserveDuration() + + if u.GetDeletionTimestamp() != nil && !r.isFinalizerRun(u) { + return nil, errors.New("resource has been deleted, but no finalizer was matched, skipping reconciliation") + } + logger := log.WithValues( + "job", ident, + "name", u.GetName(), + "namespace", u.GetNamespace(), + ) + + // start the event receiver. We'll check errChan for an error after + // ansible-runner exits. + errChan := make(chan error, 1) + receiver, err := eventapi.New(ident, errChan) + if err != nil { + return nil, err + } + inputDir := inputdir.InputDir{ + Path: filepath.Join("/tmp/ansible-operator/runner/", r.GVK.Group, r.GVK.Version, r.GVK.Kind, + u.GetNamespace(), u.GetName()), + Parameters: r.makeParameters(u), + EnvVars: map[string]string{ + "K8S_AUTH_KUBECONFIG": kubeconfig, + "KUBECONFIG": kubeconfig, + }, + Settings: map[string]string{ + "runner_http_url": receiver.SocketPath, + "runner_http_path": receiver.URLPath, + }, + CmdLine: r.ansibleArgs, + } + // If Path is a dir, assume it is a role path. Otherwise assume it's a + // playbook path + fi, err := os.Lstat(r.Path) + if err != nil { + return nil, err + } + if !fi.IsDir() { + inputDir.PlaybookPath = r.Path + } + err = inputDir.Write() + if err != nil { + return nil, err + } + maxArtifacts := r.maxRunnerArtifacts + if ma, ok := u.GetAnnotations()[MaxRunnerArtifactsAnnotation]; ok { + i, err := strconv.Atoi(ma) + if err != nil { + log.Info("Invalid max runner artifact annotation", "err", err, "value", ma) + } else { + maxArtifacts = i + } + } + + verbosity := r.ansibleVerbosity + if av, ok := u.GetAnnotations()[AnsibleVerbosityAnnotation]; ok { + i, err := strconv.Atoi(av) + if err != nil { + log.Info("Invalid ansible verbosity annotation", "err", err, "value", av) + } else { + verbosity = i + } + } + + go func() { + var dc *exec.Cmd + if r.isFinalizerRun(u) { + logger.V(1).Info("Resource is marked for deletion, running finalizer", + "Finalizer", r.Finalizer.Name) + dc = r.finalizerCmdFunc(ident, inputDir.Path, maxArtifacts, verbosity) + } else { + dc = r.cmdFunc(ident, inputDir.Path, maxArtifacts, verbosity) + } + // Append current environment since setting dc.Env to anything other than nil overwrites current env + dc.Env = append(dc.Env, os.Environ()...) + dc.Env = append(dc.Env, fmt.Sprintf("K8S_AUTH_KUBECONFIG=%s", kubeconfig), + fmt.Sprintf("KUBECONFIG=%s", kubeconfig)) + + output, err := dc.CombinedOutput() + if err != nil { + logger.Error(err, string(output)) + } else { + logger.Info("Ansible-runner exited successfully") + } + + receiver.Close() + err = <-errChan + // http.Server returns this in the case of being closed cleanly + if err != nil && err != http.ErrServerClosed { + logger.Error(err, "Error from event API") + } + + // link the current run to the `latest` directory under artifacts + currentRun := filepath.Join(inputDir.Path, "artifacts", ident) + latestArtifacts := filepath.Join(inputDir.Path, "artifacts", "latest") + if _, err = os.Lstat(latestArtifacts); err != nil { + if !errors.Is(err, os.ErrNotExist) { + logger.Error(err, "Latest artifacts dir has error") + return + } + } else if err = os.Remove(latestArtifacts); err != nil { + logger.Error(err, "Error removing the latest artifacts symlink") + return + } + + if err = os.Symlink(currentRun, latestArtifacts); err != nil { + logger.Error(err, "Error symlinking latest artifacts") + } + + }() + + return &runResult{ + events: receiver.Events, + inputDir: &inputDir, + ident: ident, + }, nil +} + +func (r *runner) isFinalizerRun(u *unstructured.Unstructured) bool { + finalizersSet := r.Finalizer != nil && u.GetFinalizers() != nil + // The resource is deleted and our finalizer is present, we need to run the finalizer + if finalizersSet && u.GetDeletionTimestamp() != nil { + for _, f := range u.GetFinalizers() { + if f == r.Finalizer.Name { + return true + } + } + } + return false +} + +// makeParameters - creates the extravars parameters for ansible +// The resulting structure in json is: +// +// { "ansible_operator_meta": { +// "name": , +// "namespace": , +// }, +// , +// , +// , +// __: { +// as is +// } +// ___spec: { +// as is +// } +// } +func (r *runner) makeParameters(u *unstructured.Unstructured) map[string]interface{} { + s := u.Object["spec"] + spec, ok := s.(map[string]interface{}) + if !ok { + log.Info("Spec was not found for CR", "GroupVersionKind", u.GroupVersionKind(), + "Namespace", u.GetNamespace(), "Name", u.GetName()) + spec = map[string]interface{}{} + } + + parameters := map[string]interface{}{} + + if r.snakeCaseParameters { + parameters = paramconv.MapToSnake(spec) + } else { + for k, v := range spec { + parameters[k] = v + } + } + + if r.markUnsafe { + for key, val := range parameters { + parameters[key] = markUnsafe(val) + } + } + + parameters["ansible_operator_meta"] = map[string]string{"namespace": u.GetNamespace(), "name": u.GetName()} + + objKey := escapeAnsibleKey(fmt.Sprintf("_%v_%v", r.GVK.Group, strings.ToLower(r.GVK.Kind))) + parameters[objKey] = u.Object + + specKey := fmt.Sprintf("%s_spec", objKey) + parameters[specKey] = spec + + for k, v := range r.Vars { + parameters[k] = v + } + if r.isFinalizerRun(u) { + for k, v := range r.Finalizer.Vars { + parameters[k] = v + } + } + return parameters +} + +// markUnsafe recursively checks for string values and marks them unsafe. +// for eg: +// +// spec: +// key: "val" +// +// would be marked unsafe in JSON format as: +// +// spec: +// key: map{__ansible_unsafe:"val"} +func markUnsafe(values interface{}) interface{} { + switch v := values.(type) { + case []interface{}: + var p []interface{} + for _, n := range v { + p = append(p, markUnsafe(n)) + } + return p + case map[string]interface{}: + m := make(map[string]interface{}) + for k, v := range v { + m[k] = markUnsafe(v) + } + return m + case string: + return map[string]interface{}{"__ansible_unsafe": values} + default: + return values + } +} + +// escapeAnsibleKey - replaces characters that would result in an inaccessible Ansible parameter with underscores +// ie, _cert-manager.k8s.io would be converted to _cert_manager_k8s_io +func escapeAnsibleKey(key string) string { + disallowed := []string{".", "-"} + for _, c := range disallowed { + key = strings.ReplaceAll(key, c, "_") + } + return key +} + +func (r *runner) GetFinalizer() (string, bool) { + if r.Finalizer != nil { + return r.Finalizer.Name, true + } + return "", false +} + +// RunResult - result of a ansible run +type RunResult interface { + // Stdout returns the stdout from ansible-runner if it is available, else an error. + Stdout() (string, error) + // Events returns the events from ansible-runner if it is available, else an error. + Events() <-chan eventapi.JobEvent +} + +// RunResult facilitates access to information about a run of ansible. +type runResult struct { + // Events is a channel of events from ansible that contain state related + // to a run of ansible. + events <-chan eventapi.JobEvent + + ident string + inputDir *inputdir.InputDir +} + +// Stdout returns the stdout from ansible-runner if it is available, else an error. +func (r *runResult) Stdout() (string, error) { + return r.inputDir.Stdout(r.ident) +} + +// Events returns the events from ansible-runner if it is available, else an error. +func (r *runResult) Events() <-chan eventapi.JobEvent { + return r.events +} diff --git a/internal/ansible/runner/runner_test.go b/internal/ansible/runner/runner_test.go new file mode 100644 index 0000000..3a35e21 --- /dev/null +++ b/internal/ansible/runner/runner_test.go @@ -0,0 +1,373 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runner + +import ( + "os" + "os/exec" + "path/filepath" + "reflect" + "testing" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/watches" +) + +func checkCmdFunc(t *testing.T, cmdFunc cmdFuncType, playbook, role string, verbosity int) { + ident := "test" + inputDirPath := "/test/path" + maxArtifacts := 1 + var expectedCmd, gotCmd *exec.Cmd + switch { + case playbook != "": + expectedCmd = playbookCmdFunc(playbook)(ident, inputDirPath, maxArtifacts, verbosity) + case role != "": + expectedCmd = roleCmdFunc(role)(ident, inputDirPath, maxArtifacts, verbosity) + } + + gotCmd = cmdFunc(ident, inputDirPath, maxArtifacts, verbosity) + + if expectedCmd.Path != gotCmd.Path { + t.Fatalf("Unexpected cmd path %v expected cmd path %v", gotCmd.Path, expectedCmd.Path) + } + + if !reflect.DeepEqual(expectedCmd.Args, gotCmd.Args) { + t.Fatalf("Unexpected cmd args %v expected cmd args %v", gotCmd.Args, expectedCmd.Args) + } +} + +func TestNew(t *testing.T) { + cwd, err := os.Getwd() + if err != nil { + t.Fatalf("Unable to get working director: %v", err) + } + validPlaybook := filepath.Join(cwd, "testdata", "playbook.yml") + validRole := filepath.Join(cwd, "testdata", "roles", "role") + testCases := []struct { + name string + gvk schema.GroupVersionKind + playbook string + role string + vars map[string]interface{} + finalizer *watches.Finalizer + desiredObjectKey string + }{ + { + name: "basic runner with playbook", + gvk: schema.GroupVersionKind{ + Group: "operator.example.com", + Version: "v1alpha1", + Kind: "Example", + }, + playbook: validPlaybook, + }, + { + name: "basic runner with role", + gvk: schema.GroupVersionKind{ + Group: "operator.example.com", + Version: "v1alpha1", + Kind: "Example", + }, + role: validRole, + }, + { + name: "basic runner with playbook + finalizer playbook", + gvk: schema.GroupVersionKind{ + Group: "operator.example.com", + Version: "v1alpha1", + Kind: "Example", + }, + playbook: validPlaybook, + finalizer: &watches.Finalizer{ + Name: "operator.example.com/finalizer", + Playbook: validPlaybook, + }, + }, + { + name: "basic runner with role + finalizer role", + gvk: schema.GroupVersionKind{ + Group: "operator.example.com", + Version: "v1alpha1", + Kind: "Example", + }, + role: validRole, + finalizer: &watches.Finalizer{ + Name: "operator.example.com/finalizer", + Role: validRole, + }, + }, + { + name: "basic runner with playbook + finalizer vars", + gvk: schema.GroupVersionKind{ + Group: "operator.example.com", + Version: "v1alpha1", + Kind: "Example", + }, + playbook: validPlaybook, + finalizer: &watches.Finalizer{ + Name: "operator.example.com/finalizer", + Vars: map[string]interface{}{ + "state": "absent", + }, + }, + }, + { + name: "basic runner with playbook, vars + finalizer vars", + gvk: schema.GroupVersionKind{ + Group: "operator.example.com", + Version: "v1alpha1", + Kind: "Example", + }, + playbook: validPlaybook, + vars: map[string]interface{}{ + "type": "this", + }, + finalizer: &watches.Finalizer{ + Name: "operator.example.com/finalizer", + Vars: map[string]interface{}{ + "state": "absent", + }, + }, + }, + { + name: "basic runner with a dash in the group name", + gvk: schema.GroupVersionKind{ + Group: "operator-with-dash.example.com", + Version: "v1alpha1", + Kind: "Example", + }, + playbook: validPlaybook, + desiredObjectKey: "_operator_with_dash_example_com_example", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + testWatch := watches.New(tc.gvk, tc.role, tc.playbook, tc.vars, tc.finalizer) + + testRunner, err := New(*testWatch, "") + if err != nil { + t.Fatalf("Error occurred unexpectedly: %v", err) + } + testRunnerStruct, ok := testRunner.(*runner) + if !ok { + t.Fatalf("Error occurred unexpectedly: %v", err) + } + + switch { + case testWatch.Playbook != "": + if testRunnerStruct.Path != testWatch.Playbook { + t.Fatalf("Unexpected path %v expected path %v", testRunnerStruct.Path, testWatch.Playbook) + } + case testWatch.Role != "": + if testRunnerStruct.Path != testWatch.Role { + t.Fatalf("Unexpected path %v expected path %v", testRunnerStruct.Path, testWatch.Role) + } + } + + // check that the group + kind are properly formatted into a parameter + if tc.desiredObjectKey != "" { + parameters := testRunnerStruct.makeParameters(&unstructured.Unstructured{}) + if _, ok := parameters[tc.desiredObjectKey]; !ok { + t.Fatalf("Did not find expected objKey %v in parameters %+v", tc.desiredObjectKey, parameters) + } + + } + + if testRunnerStruct.GVK != testWatch.GroupVersionKind { + t.Fatalf("Unexpected GVK %v expected GVK %v", testRunnerStruct.GVK, testWatch.GroupVersionKind) + } + + if testRunnerStruct.maxRunnerArtifacts != testWatch.MaxRunnerArtifacts { + t.Fatalf("Unexpected maxRunnerArtifacts %v expected maxRunnerArtifacts %v", + testRunnerStruct.maxRunnerArtifacts, testWatch.MaxRunnerArtifacts) + } + + // Check the cmdFunc + checkCmdFunc(t, testRunnerStruct.cmdFunc, testWatch.Playbook, testWatch.Role, testWatch.AnsibleVerbosity) + + // Check finalizer + if testRunnerStruct.Finalizer != testWatch.Finalizer { + t.Fatalf("Unexpected finalizer %v expected finalizer %v", testRunnerStruct.Finalizer, + testWatch.Finalizer) + } + + if testWatch.Finalizer != nil { + if testRunnerStruct.Finalizer.Name != testWatch.Finalizer.Name { + t.Fatalf("Unexpected finalizer name %v expected finalizer name %v", + testRunnerStruct.Finalizer.Name, testWatch.Finalizer.Name) + } + + if len(testWatch.Finalizer.Vars) == 0 { + checkCmdFunc(t, testRunnerStruct.cmdFunc, testWatch.Finalizer.Playbook, testWatch.Finalizer.Role, + testWatch.AnsibleVerbosity) + } else { + // when finalizer vars is set the finalizerCmdFunc should be the same as the cmdFunc + checkCmdFunc(t, testRunnerStruct.finalizerCmdFunc, testWatch.Playbook, testWatch.Role, + testWatch.AnsibleVerbosity) + } + } + }) + } +} + +func TestAnsibleVerbosityString(t *testing.T) { + testCases := []struct { + verbosity int + expectedString string + }{ + {verbosity: -1, expectedString: ""}, + {verbosity: 0, expectedString: ""}, + {verbosity: 1, expectedString: "-v"}, + {verbosity: 2, expectedString: "-vv"}, + {verbosity: 7, expectedString: "-vvvvvvv"}, + } + + for _, tc := range testCases { + gotString := ansibleVerbosityString(tc.verbosity) + if tc.expectedString != gotString { + t.Fatalf("Unexpected string %v for expected %v from verbosity %v", gotString, tc.expectedString, tc.verbosity) + } + } +} + +func TestMakeParameters(t *testing.T) { + var ( + inputSpec = "testKey" + ) + + testCases := []struct { + name string + inputParams unstructured.Unstructured + expectedSafeParams interface{} + }{ + { + name: "should mark values passed as string unsafe", + inputParams: unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + inputSpec: "testVal", + }, + }, + }, + expectedSafeParams: map[string]interface{}{ + "__ansible_unsafe": "testVal", + }, + }, + { + name: "should not mark integers unsafe", + inputParams: unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + inputSpec: 3, + }, + }, + }, + expectedSafeParams: 3, + }, + { + name: "should recursively mark values in dictionary as unsafe", + inputParams: unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + inputSpec: map[string]interface{}{ + "testsubKey1": "val1", + "testsubKey2": "val2", + }, + }, + }, + }, + expectedSafeParams: map[string]interface{}{ + "testsubKey1": map[string]interface{}{ + "__ansible_unsafe": "val1", + }, + "testsubKey2": map[string]interface{}{ + "__ansible_unsafe": "val2", + }, + }, + }, + { + name: "should recursively mark values in list as unsafe", + inputParams: unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + inputSpec: []interface{}{ + "testVal1", + "testVal2", + }, + }, + }, + }, + expectedSafeParams: []interface{}{ + map[string]interface{}{ + "__ansible_unsafe": "testVal1", + }, + map[string]interface{}{ + "__ansible_unsafe": "testVal2", + }, + }, + }, + { + name: "should recursively mark values in list/dict as unsafe", + inputParams: unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + inputSpec: []interface{}{ + "testVal1", + "testVal2", + map[string]interface{}{ + "testVal3": 3, + "testVal4": "__^&{__)", + }, + }, + }, + }, + }, + expectedSafeParams: []interface{}{ + map[string]interface{}{ + "__ansible_unsafe": "testVal1", + }, + map[string]interface{}{ + "__ansible_unsafe": "testVal2", + }, + map[string]interface{}{ + "testVal3": 3, + "testVal4": map[string]interface{}{ + "__ansible_unsafe": "__^&{__)", + }, + }, + }, + }, + } + + for _, tc := range testCases { + testRunner := runner{ + markUnsafe: true, + } + parameters := testRunner.makeParameters(&tc.inputParams) + + val, ok := parameters[inputSpec] + if !ok { + t.Fatalf("Error occurred, value %s in spec is missing", inputSpec) + } else { + eq := reflect.DeepEqual(val, tc.expectedSafeParams) + if !eq { + t.Errorf("Error occurred, parameters %v are not marked unsafe", val) + } + } + } +} diff --git a/internal/ansible/runner/testdata/playbook.yml b/internal/ansible/runner/testdata/playbook.yml new file mode 100644 index 0000000..e69de29 diff --git a/internal/ansible/runner/testdata/roles/role/tasks.yaml b/internal/ansible/runner/testdata/roles/role/tasks.yaml new file mode 100644 index 0000000..e69de29 diff --git a/internal/ansible/watches/testdata/ansible_collections/nameSpace/collection/roles/someRole/empty_file b/internal/ansible/watches/testdata/ansible_collections/nameSpace/collection/roles/someRole/empty_file new file mode 100644 index 0000000..e69de29 diff --git a/internal/ansible/watches/testdata/duplicate_gvk.yaml b/internal/ansible/watches/testdata/duplicate_gvk.yaml new file mode 100644 index 0000000..ce345bf --- /dev/null +++ b/internal/ansible/watches/testdata/duplicate_gvk.yaml @@ -0,0 +1,17 @@ +--- +- version: v1alpha1 + group: app.example.com + kind: Database + playbook: playbook.yaml + finalizer: + name: app.example.com/finalizer + vars: + sentinel: finalizer_running +- version: v1alpha1 + group: app.example.com + kind: Database + playbook: playbook.yaml + finalizer: + name: app.example.com/finalizer + vars: + sentinel: finalizer_running diff --git a/internal/ansible/watches/testdata/invalid.yaml b/internal/ansible/watches/testdata/invalid.yaml new file mode 100644 index 0000000..110371c --- /dev/null +++ b/internal/ansible/watches/testdata/invalid.yaml @@ -0,0 +1,9 @@ +--- +version: v1alpha1 + group: app.example.com + kind: Database + playbook: playbook.yaml + finalizer: + name: app.example.com/finalizer + vars: + sentinel: finalizer_running diff --git a/internal/ansible/watches/testdata/invalid_collection.yaml b/internal/ansible/watches/testdata/invalid_collection.yaml new file mode 100644 index 0000000..b172198 --- /dev/null +++ b/internal/ansible/watches/testdata/invalid_collection.yaml @@ -0,0 +1,5 @@ +--- +- version: v1alpha1 + group: app.example.com + kind: SanityUnconfirmed + role: nameSpace.collection.someRole diff --git a/internal/ansible/watches/testdata/invalid_duration.yaml b/internal/ansible/watches/testdata/invalid_duration.yaml new file mode 100644 index 0000000..f5b7ab1 --- /dev/null +++ b/internal/ansible/watches/testdata/invalid_duration.yaml @@ -0,0 +1,6 @@ +--- +- version: v1alpha1 + group: app.example.com + kind: Database + playbook: playbook.yaml + reconcilePeriod: invalid diff --git a/internal/ansible/watches/testdata/invalid_finalizer_no_vars.yaml b/internal/ansible/watches/testdata/invalid_finalizer_no_vars.yaml new file mode 100644 index 0000000..bade2a3 --- /dev/null +++ b/internal/ansible/watches/testdata/invalid_finalizer_no_vars.yaml @@ -0,0 +1,7 @@ +--- +- version: v1alpha1 + group: app.example.com + kind: Database + playbook: playbook.yaml + finalizer: + name: foo.app.example.com/finalizer diff --git a/internal/ansible/watches/testdata/invalid_finalizer_playbook_path.yaml b/internal/ansible/watches/testdata/invalid_finalizer_playbook_path.yaml new file mode 100644 index 0000000..66d8775 --- /dev/null +++ b/internal/ansible/watches/testdata/invalid_finalizer_playbook_path.yaml @@ -0,0 +1,10 @@ +--- +- version: v1alpha1 + group: app.example.com + kind: Database + playbook: playbook.yaml + finalizer: + name: app.example.com/finalizer + playbook: playbook.yaml + vars: + sentinel: finalizer_running diff --git a/internal/ansible/watches/testdata/invalid_finalizer_role_path.yaml b/internal/ansible/watches/testdata/invalid_finalizer_role_path.yaml new file mode 100644 index 0000000..a060604 --- /dev/null +++ b/internal/ansible/watches/testdata/invalid_finalizer_role_path.yaml @@ -0,0 +1,10 @@ +--- +- version: v1alpha1 + group: app.example.com + kind: Database + playbook: playbook.yaml + finalizer: + name: app.example.com/finalizer + role: ansible/role + vars: + sentinel: finalizer_running diff --git a/internal/ansible/watches/testdata/invalid_finalizer_whithout_name.yaml b/internal/ansible/watches/testdata/invalid_finalizer_whithout_name.yaml new file mode 100644 index 0000000..0728af6 --- /dev/null +++ b/internal/ansible/watches/testdata/invalid_finalizer_whithout_name.yaml @@ -0,0 +1,8 @@ +--- +- version: v1alpha1 + group: app.example.com + kind: Database + playbook: playbook.yaml + finalizer: + name: + diff --git a/internal/ansible/watches/testdata/invalid_playbook_path.yaml b/internal/ansible/watches/testdata/invalid_playbook_path.yaml new file mode 100644 index 0000000..9966a3a --- /dev/null +++ b/internal/ansible/watches/testdata/invalid_playbook_path.yaml @@ -0,0 +1,9 @@ +--- +- version: v1alpha1 + group: app.example.com + kind: Database + playbook: invalid/playbook.yaml + finalizer: + name: app.example.com/finalizer + vars: + sentinel: finalizer_running diff --git a/internal/ansible/watches/testdata/invalid_role_path.yaml b/internal/ansible/watches/testdata/invalid_role_path.yaml new file mode 100644 index 0000000..8fb5d34 --- /dev/null +++ b/internal/ansible/watches/testdata/invalid_role_path.yaml @@ -0,0 +1,9 @@ +--- +- version: v1alpha1 + group: app.example.com + kind: Database + role: opt/ansible/playbook.yaml + finalizer: + name: app.example.com/finalizer + vars: + sentinel: finalizer_running diff --git a/internal/ansible/watches/testdata/invalid_status.yaml b/internal/ansible/watches/testdata/invalid_status.yaml new file mode 100644 index 0000000..3ba502c --- /dev/null +++ b/internal/ansible/watches/testdata/invalid_status.yaml @@ -0,0 +1,6 @@ +--- +- version: v1alpha1 + group: app.example.com + kind: Database + playbook: playbook.yaml + watches: invalid diff --git a/internal/ansible/watches/testdata/invalid_yaml_file.yaml b/internal/ansible/watches/testdata/invalid_yaml_file.yaml new file mode 100644 index 0000000..9759ac2 --- /dev/null +++ b/internal/ansible/watches/testdata/invalid_yaml_file.yaml @@ -0,0 +1,3 @@ +--- +invalid file layout + diff --git a/internal/ansible/watches/testdata/playbook.yml b/internal/ansible/watches/testdata/playbook.yml new file mode 100644 index 0000000..e69de29 diff --git a/internal/ansible/watches/testdata/roles/role/tasks.yaml b/internal/ansible/watches/testdata/roles/role/tasks.yaml new file mode 100644 index 0000000..e69de29 diff --git a/internal/ansible/watches/testdata/valid.yaml.tmpl b/internal/ansible/watches/testdata/valid.yaml.tmpl new file mode 100644 index 0000000..273dbc6 --- /dev/null +++ b/internal/ansible/watches/testdata/valid.yaml.tmpl @@ -0,0 +1,124 @@ +--- +- version: v1alpha1 + group: app.example.com + kind: NoFinalizer + playbook: {{ .ValidPlaybook }} + reconcilePeriod: 2s +- version: v1alpha1 + group: app.example.com + kind: WithUnsafeMarked + playbook: {{ .ValidPlaybook }} + reconcilePeriod: 2s + markUnsafe: True +- version: v1alpha1 + group: app.example.com + kind: Playbook + playbook: {{ .ValidPlaybook }} + finalizer: + name: app.example.com/finalizer + role: {{ .ValidRole }} + vars: + sentinel: finalizer_running +- version: v1alpha1 + group: app.example.com + kind: WatchClusterScoped + playbook: {{ .ValidPlaybook }} + reconcilePeriod: 2s + watchClusterScopedResources: true +- version: v1alpha1 + group: app.example.com + kind: NoReconcile + playbook: {{ .ValidPlaybook }} + reconcilePeriod: 0s +- version: v1alpha1 + group: app.example.com + kind: DefaultStatus + playbook: {{ .ValidPlaybook }} +- version: v1alpha1 + group: app.example.com + kind: DisableStatus + playbook: {{ .ValidPlaybook }} + manageStatus: False +- version: v1alpha1 + group: app.example.com + kind: EnableStatus + playbook: {{ .ValidPlaybook }} + manageStatus: True +- version: v1alpha1 + group: app.example.com + kind: Role + role: {{ .ValidRole }} + finalizer: + name: app.example.com/finalizer + playbook: {{ .ValidPlaybook }} + vars: + sentinel: finalizer_running +- version: v1alpha1 + group: app.example.com + kind: FinalizerRole + role: {{ .ValidRole }} + finalizer: + name: app.example.com/finalizer + vars: + sentinel: finalizer_running +- version: v1alpha1 + group: app.example.com + kind: MaxConcurrentReconcilesDefault + role: {{ .ValidRole }} +- version: v1alpha1 + group: app.example.com + kind: MaxConcurrentReconcilesIgnored + role: {{ .ValidRole }} + maxWorkers: 5 +- version: v1alpha1 + group: app.example.com + kind: MaxConcurrentReconcilesEnv + role: {{ .ValidRole }} +- version: v1alpha1 + group: app.example.com + kind: AnsibleVerbosityDefault + role: {{ .ValidRole }} +- version: v1alpha1 + group: app.example.com + kind: AnsibleVerbosityIgnored + role: {{ .ValidRole }} + ansibleVerbosity: 5 +- version: v1alpha1 + group: app.example.com + kind: AnsibleVerbosityEnv + role: {{ .ValidRole }} +- version: v1alpha1 + group: app.example.com + kind: WatchWithVars + role: {{ .ValidRole }} + vars: + sentinel: reconciling +- version: v1alpha1 + group: app.example.com + kind: AnsibleCollectionEnvTest + role: nameSpace.collection.someRole +- version: v1alpha1 + group: app.example.com + kind: AnsibleBlacklistTest + manageStatus: True + role: {{ .ValidRole }} + blacklist: + - version: "v1alpha1.1" + group: "app.example.com/1" + kind: "AnsibleBlacklistTest_1" + - version: "v1alpha1.2" + group: "app.example.com/2" + kind: "AnsibleBlacklistTest_2" + - version: "v1alpha1.3" + group: "app.example.com/3" + kind: "AnsibleBlacklistTest_3" +- version: "v1alpha1" + group: "app.example.com" + kind: "AnsibleSelectorTest" + manageStatus: True + role: {{ .ValidRole }} + selector: + matchLabels: + matchLabel_1: matchLabel_1 + matchExpressions: + - {key: matchexpression_key, operator: matchexpression_operator, values: [value1,value2]} diff --git a/internal/ansible/watches/watches.go b/internal/ansible/watches/watches.go new file mode 100644 index 0000000..5a3bd74 --- /dev/null +++ b/internal/ansible/watches/watches.go @@ -0,0 +1,484 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package watches provides the structures and functions for mapping a +// GroupVersionKind to an Ansible playbook or role. +package watches + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + logf "sigs.k8s.io/controller-runtime/pkg/log" + yaml "sigs.k8s.io/yaml" + + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/flags" +) + +var log = logf.Log.WithName("watches") + +// Watch - holds data used to create a mapping of GVK to ansible playbook or role. +// The mapping is used to compose an ansible operator. +type Watch struct { + GroupVersionKind schema.GroupVersionKind `yaml:",inline"` + Blacklist []schema.GroupVersionKind `yaml:"blacklist"` + Playbook string `yaml:"playbook"` + Role string `yaml:"role"` + Vars map[string]interface{} `yaml:"vars"` + MaxRunnerArtifacts int `yaml:"maxRunnerArtifacts"` + ReconcilePeriod metav1.Duration `yaml:"reconcilePeriod"` + Finalizer *Finalizer `yaml:"finalizer"` + ManageStatus bool `yaml:"manageStatus"` + WatchDependentResources bool `yaml:"watchDependentResources"` + WatchClusterScopedResources bool `yaml:"watchClusterScopedResources"` + SnakeCaseParameters bool `yaml:"snakeCaseParameters"` + WatchAnnotationsChanges bool `yaml:"watchAnnotationsChanges"` + MarkUnsafe bool `yaml:"markUnsafe"` + Selector metav1.LabelSelector `yaml:"selector"` + + // Not configurable via watches.yaml + MaxConcurrentReconciles int `yaml:"-"` + AnsibleVerbosity int `yaml:"-"` +} + +// Finalizer - Expose finalizer to be used by a user. +type Finalizer struct { + Name string `yaml:"name"` + Playbook string `yaml:"playbook"` + Role string `yaml:"role"` + Vars map[string]interface{} `yaml:"vars"` +} + +// Default values for optional fields on Watch +var ( + blacklistDefault = []schema.GroupVersionKind{} + maxRunnerArtifactsDefault = 20 + reconcilePeriodDefault = metav1.Duration{Duration: time.Duration(0)} + manageStatusDefault = true + watchDependentResourcesDefault = true + watchClusterScopedResourcesDefault = false + snakeCaseParametersDefault = true + watchAnnotationsChangesDefault = false + markUnsafeDefault = false + selectorDefault = metav1.LabelSelector{} + + // these are overridden by cmdline flags + maxConcurrentReconcilesDefault = runtime.NumCPU() + ansibleVerbosityDefault = 2 +) + +// Use an alias struct to handle complex types +type alias struct { + Group string `yaml:"group"` + Version string `yaml:"version"` + Kind string `yaml:"kind"` + Playbook string `yaml:"playbook"` + Role string `yaml:"role"` + Vars map[string]interface{} `yaml:"vars"` + MaxRunnerArtifacts int `yaml:"maxRunnerArtifacts"` + ReconcilePeriod *metav1.Duration `yaml:"reconcilePeriod,omitempty"` + ManageStatus *bool `yaml:"manageStatus,omitempty"` + WatchDependentResources *bool `yaml:"watchDependentResources,omitempty"` + WatchClusterScopedResources *bool `yaml:"watchClusterScopedResources,omitempty"` + SnakeCaseParameters *bool `yaml:"snakeCaseParameters"` + WatchAnnotationsChanges *bool `yaml:"watchAnnotationsChanges"` + MarkUnsafe *bool `yaml:"markUnsafe"` + Blacklist []schema.GroupVersionKind `yaml:"blacklist,omitempty"` + Finalizer *Finalizer `yaml:"finalizer"` + Selector metav1.LabelSelector `yaml:"selector"` +} + +// buildWatch will build Watch based on the values parsed from alias +func (w *Watch) setValuesFromAlias(tmp alias) error { + // by default, the operator will manage status and watch dependent resources + if tmp.ManageStatus == nil { + tmp.ManageStatus = &manageStatusDefault + } + // the operator will not manage cluster scoped resources by default. + if tmp.WatchDependentResources == nil { + tmp.WatchDependentResources = &watchDependentResourcesDefault + } + if tmp.MaxRunnerArtifacts == 0 { + tmp.MaxRunnerArtifacts = maxRunnerArtifactsDefault + } + + if tmp.ReconcilePeriod == nil { + tmp.ReconcilePeriod = &reconcilePeriodDefault + } + + if tmp.WatchClusterScopedResources == nil { + tmp.WatchClusterScopedResources = &watchClusterScopedResourcesDefault + } + + if tmp.Blacklist == nil { + tmp.Blacklist = blacklistDefault + } + + if tmp.SnakeCaseParameters == nil { + tmp.SnakeCaseParameters = &snakeCaseParametersDefault + } + + if tmp.WatchAnnotationsChanges == nil { + tmp.WatchAnnotationsChanges = &watchAnnotationsChangesDefault + } + + if tmp.MarkUnsafe == nil { + tmp.MarkUnsafe = &markUnsafeDefault + } + + gvk := schema.GroupVersionKind{ + Group: tmp.Group, + Version: tmp.Version, + Kind: tmp.Kind, + } + err := verifyGVK(gvk) + if err != nil { + return fmt.Errorf("invalid GVK: %s: %w", gvk, err) + } + + // Rewrite values to struct being unmarshalled + w.GroupVersionKind = gvk + w.Playbook = tmp.Playbook + w.Role = tmp.Role + w.Vars = tmp.Vars + w.MaxRunnerArtifacts = tmp.MaxRunnerArtifacts + w.MaxConcurrentReconciles = getMaxConcurrentReconciles(gvk, maxConcurrentReconcilesDefault) + w.ReconcilePeriod = *tmp.ReconcilePeriod + w.ManageStatus = *tmp.ManageStatus + w.WatchDependentResources = *tmp.WatchDependentResources + w.SnakeCaseParameters = *tmp.SnakeCaseParameters + w.WatchAnnotationsChanges = *tmp.WatchAnnotationsChanges + w.MarkUnsafe = *tmp.MarkUnsafe + w.WatchClusterScopedResources = *tmp.WatchClusterScopedResources + w.Finalizer = tmp.Finalizer + w.AnsibleVerbosity = getAnsibleVerbosity(gvk, ansibleVerbosityDefault) + w.Blacklist = tmp.Blacklist + + wd, err := os.Getwd() + if err != nil { + return err + } + w.addRolePlaybookPaths(wd) + w.Selector = tmp.Selector + + return nil +} + +// addRolePlaybookPaths will add the full path based on the current dir +func (w *Watch) addRolePlaybookPaths(rootDir string) { + if len(w.Playbook) > 0 { + w.Playbook = getFullPath(rootDir, w.Playbook) + } + + if len(w.Role) > 0 { + possibleRolePaths := getPossibleRolePaths(rootDir, w.Role) + for _, possiblePath := range possibleRolePaths { + if _, err := os.Stat(possiblePath); err == nil { + w.Role = possiblePath + break + } + } + } + if w.Finalizer != nil && len(w.Finalizer.Role) > 0 { + possibleRolePaths := getPossibleRolePaths(rootDir, w.Finalizer.Role) + for _, possiblePath := range possibleRolePaths { + if _, err := os.Stat(possiblePath); err == nil { + w.Finalizer.Role = possiblePath + break + } + } + } + if w.Finalizer != nil && len(w.Finalizer.Playbook) > 0 { + w.Finalizer.Playbook = getFullPath(rootDir, w.Finalizer.Playbook) + } +} + +// getFullPath returns an absolute path for the playbook +func getFullPath(rootDir, path string) string { + if len(path) > 0 && !filepath.IsAbs(path) { + return filepath.Join(rootDir, path) + } + return path +} + +// getPossibleRolePaths returns list of possible absolute paths derived from a user provided value. +func getPossibleRolePaths(rootDir, path string) []string { + possibleRolePaths := []string{} + if filepath.IsAbs(path) || len(path) == 0 { + return append(possibleRolePaths, path) + } + fqcn := strings.Split(path, ".") + // If fqcn is a valid fully qualified collection name, it is .. + if len(fqcn) == 3 { + ansibleCollectionsPathEnv, ok := os.LookupEnv(flags.AnsibleCollectionsPathEnvVar) + if !ok || len(ansibleCollectionsPathEnv) == 0 { + ansibleCollectionsPathEnv = "/usr/share/ansible/collections" + home, err := os.UserHomeDir() + if err == nil { + homeCollections := filepath.Join(home, ".ansible/collections") + ansibleCollectionsPathEnv = ansibleCollectionsPathEnv + ":" + homeCollections + } + } + for _, possiblePathParent := range strings.Split(ansibleCollectionsPathEnv, ":") { + possiblePath := filepath.Join(possiblePathParent, "ansible_collections", fqcn[0], fqcn[1], "roles", fqcn[2]) + possibleRolePaths = append(possibleRolePaths, possiblePath) + } + } + + // Check for the role where Ansible would. If it exists, use it. + ansibleRolesPathEnv, ok := os.LookupEnv(flags.AnsibleRolesPathEnvVar) + if ok && len(ansibleRolesPathEnv) > 0 { + for _, possiblePathParent := range strings.Split(ansibleRolesPathEnv, ":") { + // "roles" is optionally a part of the path. Check with, and without. + possibleRolePaths = append(possibleRolePaths, filepath.Join(possiblePathParent, path)) + possibleRolePaths = append(possibleRolePaths, filepath.Join(possiblePathParent, "roles", path)) + } + } + // Roles can also live in the current working directory. + return append(possibleRolePaths, getFullPath(rootDir, filepath.Join("roles", path))) +} + +// Validate - ensures that a Watch is valid +// A Watch is considered valid if it: +// - Specifies a valid path to a Role||Playbook +// - If a Finalizer is non-nil, it must have a name + valid path to a Role||Playbook or Vars +func (w *Watch) Validate() error { + err := verifyAnsiblePath(w.Playbook, w.Role) + if err != nil { + log.Error(err, fmt.Sprintf("Invalid ansible path for GVK: %v", w.GroupVersionKind.String())) + return err + } + + if w.Finalizer != nil { + if w.Finalizer.Name == "" { + err = fmt.Errorf("finalizer must have name") + log.Error(err, fmt.Sprintf("Invalid finalizer for GVK: %v", w.GroupVersionKind.String())) + return err + } + // only fail if Vars not set + err = verifyAnsiblePath(w.Finalizer.Playbook, w.Finalizer.Role) + if err != nil && len(w.Finalizer.Vars) == 0 { + log.Error(err, fmt.Sprintf("Invalid ansible path on Finalizer for GVK: %v", + w.GroupVersionKind.String())) + return err + } + } + + return nil +} + +// New - returns a Watch with sensible defaults. +func New(gvk schema.GroupVersionKind, role, playbook string, vars map[string]interface{}, finalizer *Finalizer) *Watch { + return &Watch{ + Blacklist: blacklistDefault, + GroupVersionKind: gvk, + Playbook: playbook, + Role: role, + Vars: vars, + MaxRunnerArtifacts: maxRunnerArtifactsDefault, + MaxConcurrentReconciles: maxConcurrentReconcilesDefault, + ReconcilePeriod: reconcilePeriodDefault, + ManageStatus: manageStatusDefault, + WatchDependentResources: watchDependentResourcesDefault, + WatchClusterScopedResources: watchClusterScopedResourcesDefault, + SnakeCaseParameters: snakeCaseParametersDefault, + WatchAnnotationsChanges: watchAnnotationsChangesDefault, + MarkUnsafe: markUnsafeDefault, + Finalizer: finalizer, + AnsibleVerbosity: ansibleVerbosityDefault, + Selector: selectorDefault, + } +} + +// Load - loads a slice of Watches from the watches file from the CLI +func Load(path string, maxReconciler, ansibleVerbosity int) ([]Watch, error) { + maxConcurrentReconcilesDefault = maxReconciler + ansibleVerbosityDefault = ansibleVerbosity + b, err := os.ReadFile(path) + if err != nil { + log.Error(err, "Failed to get config file") + return nil, err + } + + // First unmarshal into a slice of aliases. + alias := []alias{} + err = yaml.Unmarshal(b, &alias) + if err != nil { + log.Error(err, "Failed to unmarshal config") + return nil, err + } + + // Create one Watch per alias in aliases. + + watches := []Watch{} + for _, tmp := range alias { + w := Watch{} + err = w.setValuesFromAlias(tmp) + if err != nil { + return nil, err + } + watches = append(watches, w) + } + + watchesMap := make(map[schema.GroupVersionKind]bool) + for _, watch := range watches { + // prevent dupes + if _, ok := watchesMap[watch.GroupVersionKind]; ok { + return nil, fmt.Errorf("duplicate GVK: %v", watch.GroupVersionKind.String()) + } + + watchesMap[watch.GroupVersionKind] = true + + err = watch.Validate() + if err != nil { + log.Error(err, fmt.Sprintf("Watch with GVK %v failed validation", watch.GroupVersionKind.String())) + return nil, err + } + } + + return watches, nil +} + +// verify that a given GroupVersionKind has a Version and Kind +// A GVK without a group is valid. Certain scenarios may cause a GVK +// without a group to fail in other ways later in the initialization +// process. +func verifyGVK(gvk schema.GroupVersionKind) error { + if gvk.Version == "" { + return errors.New("version must not be empty") + } + if gvk.Kind == "" { + return errors.New("kind must not be empty") + } + return nil +} + +// verify that a valid path is specified for a given role or playbook +func verifyAnsiblePath(playbook string, role string) error { + switch { + case playbook != "": + if _, err := os.Stat(playbook); err != nil { + return fmt.Errorf("playbook: %v was not found", playbook) + } + case role != "": + if _, err := os.Stat(role); err != nil { + return fmt.Errorf("role: %v was not found", role) + } + default: + return fmt.Errorf("must specify Role or Playbook") + } + return nil +} + +// if the WORKER_* environment variable is set, use that value. +// Otherwise, use defValue. This is definitely +// counter-intuitive but it allows the operator admin adjust the +// number of workers based on their cluster resources. While the +// author may use the CLI option to specify a suggested +// configuration for the operator. +func getMaxConcurrentReconciles(gvk schema.GroupVersionKind, defValue int) int { + envVarMaxWorker := strings.ToUpper(strings.ReplaceAll( + fmt.Sprintf("WORKER_%s_%s", gvk.Kind, gvk.Group), + ".", + "_", + )) + envVarMaxReconciler := strings.ToUpper(strings.ReplaceAll( + fmt.Sprintf("MAX_CONCURRENT_RECONCILES_%s_%s", gvk.Kind, gvk.Group), + ".", + "_", + )) + envVal := getIntegerEnvMaxReconcile(envVarMaxWorker, envVarMaxReconciler, defValue) + if envVal <= 0 { + log.Info("Value %v not valid. Using default %v", envVal, defValue) + return defValue + } + return envVal +} + +// if the ANSIBLE_VERBOSITY_* environment variable is set, use that value. +// Otherwise, use defValue. +func getAnsibleVerbosity(gvk schema.GroupVersionKind, defValue int) int { + envVar := strings.ToUpper(strings.Replace( + fmt.Sprintf("ANSIBLE_VERBOSITY_%s_%s", gvk.Kind, gvk.Group), + ".", + "_", + -1, + )) + ansibleVerbosity := getIntegerEnvWithDefault(envVar, defValue) + // Use default value when value doesn't make sense + if ansibleVerbosity < 0 { + log.Info("Value %v not valid. Using default %v", ansibleVerbosity, defValue) + return defValue + } + if ansibleVerbosity > 7 { + log.Info("Value %v not valid. Using default %v", ansibleVerbosity, defValue) + return defValue + } + return ansibleVerbosity +} + +// getIntegerEnvWithDefault returns value for MaxWorkers/Ansibleverbosity based on if envVar is set +// sor a defvalue is used. +func getIntegerEnvWithDefault(envVar string, defValue int) int { + val := defValue + if envVal, ok := os.LookupEnv(envVar); ok { + if i, err := strconv.Atoi(envVal); err != nil { + log.Info("Could not parse environment variable as an integer; using default value", + "envVar", envVar, "default", defValue) + } else { + val = i + } + } else if !ok { + log.Info("Environment variable not set; using default value", "envVar", envVar, + "default", defValue) + } + return val +} + +// getIntegerEnvMaxReconcile looks for global variable "MAX_CONCURRENT_RECONCILES__", +// if not present it checks for "WORKER__" and logs deprecation message +// if required. If both of them are not set, we use the default value passed on by command line +// flags. +func getIntegerEnvMaxReconcile(envVarMaxWorker, envVarMaxReconciler string, defValue int) int { + val := defValue + if envValRecon, ok := os.LookupEnv(envVarMaxReconciler); ok { + if i, err := strconv.Atoi(envValRecon); err != nil { + log.Info("Could not parse environment variable as an integer; using default value", + "envVar", envVarMaxReconciler, "default", defValue) + } else { + val = i + } + } else if !ok { + if envValWorker, ok := os.LookupEnv(envVarMaxWorker); ok { + deprecationMsg := fmt.Sprintf("Environment variable %s is deprecated, use %s instead", envVarMaxWorker, envVarMaxReconciler) + log.Info(deprecationMsg) + if i, err := strconv.Atoi(envValWorker); err != nil { + log.Info("Could not parse environment variable as an integer; using default value", + "envVar", envVarMaxWorker, "default", defValue) + } else { + val = i + } + } + } + return val + +} diff --git a/internal/ansible/watches/watches_test.go b/internal/ansible/watches/watches_test.go new file mode 100644 index 0000000..6b75d8a --- /dev/null +++ b/internal/ansible/watches/watches_test.go @@ -0,0 +1,894 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package watches + +import ( + "fmt" + "html/template" + "os" + "path/filepath" + "reflect" + "sort" + "strconv" + "testing" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func TestNew(t *testing.T) { + basicGVK := schema.GroupVersionKind{ + Version: "v1alpha1", + Group: "app.example.com", + Kind: "Example", + } + testCases := []struct { + name string + gvk schema.GroupVersionKind + role string + playbook string + vars map[string]interface{} + finalizer *Finalizer + shouldValidate bool + }{ + { + name: "default invalid watch", + gvk: basicGVK, + shouldValidate: false, + }, + } + + expectedReconcilePeriod, _ := time.ParseDuration(reconcilePeriodDefault.String()) + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + watch := New(tc.gvk, tc.role, tc.playbook, tc.vars, tc.finalizer) + if watch.GroupVersionKind != tc.gvk { + t.Fatalf("Unexpected GVK %v expected %v", watch.GroupVersionKind, tc.gvk) + } + if watch.MaxRunnerArtifacts != maxRunnerArtifactsDefault { + t.Fatalf("Unexpected maxRunnerArtifacts %v expected %v", watch.MaxRunnerArtifacts, + maxRunnerArtifactsDefault) + } + if watch.MaxConcurrentReconciles != maxConcurrentReconcilesDefault { + t.Fatalf("Unexpected maxConcurrentReconciles %v expected %v", watch.MaxConcurrentReconciles, + maxConcurrentReconcilesDefault) + } + if watch.ReconcilePeriod.Duration != expectedReconcilePeriod { + t.Fatalf("Unexpected reconcilePeriod %v expected %v", watch.ReconcilePeriod, + expectedReconcilePeriod) + } + if watch.ManageStatus != manageStatusDefault { + t.Fatalf("Unexpected manageStatus %v expected %v", watch.ManageStatus, &manageStatusDefault) + } + if watch.WatchDependentResources != watchDependentResourcesDefault { + t.Fatalf("Unexpected watchDependentResources %v expected %v", watch.WatchDependentResources, + watchDependentResourcesDefault) + } + if watch.SnakeCaseParameters != snakeCaseParametersDefault { + t.Fatalf("Unexpected snakeCaseParameters %v expected %v", watch.SnakeCaseParameters, + snakeCaseParametersDefault) + } + if watch.MarkUnsafe != markUnsafeDefault { + t.Fatalf("Unexpected markUnsafe %v expected %v", watch.MarkUnsafe, markUnsafeDefault) + } + if watch.WatchClusterScopedResources != watchClusterScopedResourcesDefault { + t.Fatalf("Unexpected watchClusterScopedResources %v expected %v", + watch.WatchClusterScopedResources, watchClusterScopedResourcesDefault) + } + if watch.AnsibleVerbosity != ansibleVerbosityDefault { + t.Fatalf("Unexpected ansibleVerbosity %v expected %v", watch.AnsibleVerbosity, + ansibleVerbosityDefault) + } + + err := watch.Validate() + if err != nil && tc.shouldValidate { + t.Fatalf("Watch %v failed validation", watch) + } + if err == nil && !tc.shouldValidate { + t.Fatalf("Watch %v should have failed validation", watch) + } + }) + } +} + +func TestLoad(t *testing.T) { //nolint:gocyclo + cwd, err := os.Getwd() + if err != nil { + t.Fatalf("Unable to get working director: %v", err) + } + + validTemplate := struct { + ValidPlaybook string + ValidRole string + }{ + ValidPlaybook: filepath.Join(cwd, "testdata", "playbook.yml"), + ValidRole: filepath.Join(cwd, "testdata", "roles", "role"), + } + + tmpl, err := template.ParseFiles("testdata/valid.yaml.tmpl") + if err != nil { + t.Fatalf("Unable to parse template: %v", err) + } + f, err := os.Create("testdata/valid.yaml") + if err != nil { + t.Fatalf("Unable to create valid.yaml: %v", err) + } + defer os.Remove("testdata/valid.yaml") + err = tmpl.Execute(f, validTemplate) + if err != nil { + t.Fatalf("Unable to create valid.yaml: %v", err) + return + } + + zeroSeconds := metav1.Duration{Duration: time.Duration(0)} + twoSeconds := metav1.Duration{Duration: time.Second * 2} + + validWatches := []Watch{ + Watch{ + GroupVersionKind: schema.GroupVersionKind{ + Version: "v1alpha1", + Group: "app.example.com", + Kind: "NoFinalizer", + }, + Playbook: validTemplate.ValidPlaybook, + ManageStatus: true, + ReconcilePeriod: twoSeconds, + WatchDependentResources: true, + WatchClusterScopedResources: false, + SnakeCaseParameters: true, + MarkUnsafe: false, + }, + Watch{ + GroupVersionKind: schema.GroupVersionKind{ + Version: "v1alpha1", + Group: "app.example.com", + Kind: "WithUnsafeMarked", + }, + Playbook: validTemplate.ValidPlaybook, + ManageStatus: true, + ReconcilePeriod: twoSeconds, + MarkUnsafe: true, + }, + Watch{ + GroupVersionKind: schema.GroupVersionKind{ + Version: "v1alpha1", + Group: "app.example.com", + Kind: "Playbook", + }, + Playbook: validTemplate.ValidPlaybook, + ManageStatus: true, + WatchDependentResources: true, + SnakeCaseParameters: false, + WatchClusterScopedResources: false, + Finalizer: &Finalizer{ + Name: "app.example.com/finalizer", + Role: validTemplate.ValidRole, + Vars: map[string]interface{}{"sentinel": "finalizer_running"}, + }, + }, + Watch{ + GroupVersionKind: schema.GroupVersionKind{ + Version: "v1alpha1", + Group: "app.example.com", + Kind: "WatchClusterScoped", + }, + Playbook: validTemplate.ValidPlaybook, + ReconcilePeriod: twoSeconds, + ManageStatus: true, + WatchDependentResources: true, + WatchClusterScopedResources: true, + }, + Watch{ + GroupVersionKind: schema.GroupVersionKind{ + Version: "v1alpha1", + Group: "app.example.com", + Kind: "NoReconcile", + }, + Playbook: validTemplate.ValidPlaybook, + ReconcilePeriod: zeroSeconds, + ManageStatus: true, + }, + Watch{ + GroupVersionKind: schema.GroupVersionKind{ + Version: "v1alpha1", + Group: "app.example.com", + Kind: "DefaultStatus", + }, + Playbook: validTemplate.ValidPlaybook, + ManageStatus: true, + }, + Watch{ + GroupVersionKind: schema.GroupVersionKind{ + Version: "v1alpha1", + Group: "app.example.com", + Kind: "DisableStatus", + }, + Playbook: validTemplate.ValidPlaybook, + ManageStatus: false, + }, + Watch{ + GroupVersionKind: schema.GroupVersionKind{ + Version: "v1alpha1", + Group: "app.example.com", + Kind: "EnableStatus", + }, + Playbook: validTemplate.ValidPlaybook, + ManageStatus: true, + }, + Watch{ + GroupVersionKind: schema.GroupVersionKind{ + Version: "v1alpha1", + Group: "app.example.com", + Kind: "Role", + }, + Role: validTemplate.ValidRole, + ManageStatus: true, + Finalizer: &Finalizer{ + Name: "app.example.com/finalizer", + Playbook: validTemplate.ValidPlaybook, + Vars: map[string]interface{}{"sentinel": "finalizer_running"}, + }, + }, + Watch{ + GroupVersionKind: schema.GroupVersionKind{ + Version: "v1alpha1", + Group: "app.example.com", + Kind: "FinalizerRole", + }, + Role: validTemplate.ValidRole, + ManageStatus: true, + Finalizer: &Finalizer{ + Name: "app.example.com/finalizer", + Vars: map[string]interface{}{"sentinel": "finalizer_running"}, + }, + }, + Watch{ + GroupVersionKind: schema.GroupVersionKind{ + Version: "v1alpha1", + Group: "app.example.com", + Kind: "MaxConcurrentReconcilesDefault", + }, + Role: validTemplate.ValidRole, + ManageStatus: true, + MaxConcurrentReconciles: 1, + }, + Watch{ + GroupVersionKind: schema.GroupVersionKind{ + Version: "v1alpha1", + Group: "app.example.com", + Kind: "MaxConcurrentReconcilesIgnored", + }, + Role: validTemplate.ValidRole, + ManageStatus: true, + MaxConcurrentReconciles: 1, + }, + Watch{ + GroupVersionKind: schema.GroupVersionKind{ + Version: "v1alpha1", + Group: "app.example.com", + Kind: "MaxConcurrentReconcilesEnv", + }, + Role: validTemplate.ValidRole, + ManageStatus: true, + MaxConcurrentReconciles: 4, + }, + Watch{ + GroupVersionKind: schema.GroupVersionKind{ + Version: "v1alpha1", + Group: "app.example.com", + Kind: "AnsibleVerbosityDefault", + }, + Role: validTemplate.ValidRole, + ManageStatus: true, + AnsibleVerbosity: 2, + }, + Watch{ + GroupVersionKind: schema.GroupVersionKind{ + Version: "v1alpha1", + Group: "app.example.com", + Kind: "AnsibleVerbosityIgnored", + }, + Role: validTemplate.ValidRole, + ManageStatus: true, + AnsibleVerbosity: 2, + }, + Watch{ + GroupVersionKind: schema.GroupVersionKind{ + Version: "v1alpha1", + Group: "app.example.com", + Kind: "AnsibleVerbosityEnv", + }, + Role: validTemplate.ValidRole, + ManageStatus: true, + AnsibleVerbosity: 4, + }, + Watch{ + GroupVersionKind: schema.GroupVersionKind{ + Version: "v1alpha1", + Group: "app.example.com", + Kind: "WatchWithVars", + }, + Role: validTemplate.ValidRole, + ManageStatus: true, + Vars: map[string]interface{}{"sentinel": "reconciling"}, + }, + Watch{ + GroupVersionKind: schema.GroupVersionKind{ + Version: "v1alpha1", + Group: "app.example.com", + Kind: "AnsibleCollectionEnvTest", + }, + Role: filepath.Join(cwd, "testdata", "ansible_collections", "nameSpace", "collection", "roles", "someRole"), + ManageStatus: true, + }, + Watch{ + GroupVersionKind: schema.GroupVersionKind{ + Version: "v1alpha1", + Group: "app.example.com", + Kind: "AnsibleBlacklistTest", + }, + Role: validTemplate.ValidRole, + Blacklist: []schema.GroupVersionKind{ + { + Version: "v1alpha1.1", + Group: "app.example.com/1", + Kind: "AnsibleBlacklistTest_1", + }, + { + Version: "v1alpha1.2", + Group: "app.example.com/2", + Kind: "AnsibleBlacklistTest_2", + }, + { + Version: "v1alpha1.3", + Group: "app.example.com/3", + Kind: "AnsibleBlacklistTest_3", + }, + }, + ManageStatus: true, + }, + Watch{ + GroupVersionKind: schema.GroupVersionKind{ + Version: "v1alpha1", + Group: "app.example.com", + Kind: "AnsibleSelectorTest", + }, + Role: validTemplate.ValidRole, + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "matchLabel_1": "matchLabel_1", + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "matchexpression_key", + Operator: "matchexpression_operator", + Values: []string{"value1", "value2"}, + }, + }, + }, + ManageStatus: true, + }, + } + + testCases := []struct { + name string + path string + maxConcurrentReconciles int + ansibleVerbosity int + expected []Watch + shouldError bool + shouldSetAnsibleRolePathEnvVar bool + shouldSetAnsibleCollectionPathEnvVar bool + }{ + { + name: "error duplicate GVK", + path: "testdata/duplicate_gvk.yaml", + shouldError: true, + }, + { + name: "error no file", + path: "testdata/please_don't_create_me_gvk.yaml", + shouldError: true, + }, + { + name: "error invalid yaml", + path: "testdata/invalid.yaml", + shouldError: true, + }, + { + name: "error invalid playbook path", + path: "testdata/invalid_playbook_path.yaml", + shouldError: true, + }, + { + name: "error invalid playbook finalizer path", + path: "testdata/invalid_finalizer_playbook_path.yaml", + shouldError: true, + }, + { + name: "error invalid finalizer whithout name", + path: "testdata/invalid_finalizer_whithout_name.yaml", + shouldError: true, + }, + { + name: "error invalid role path", + path: "testdata/invalid_role_path.yaml", + shouldError: true, + }, + { + name: "error invalid yaml file", + path: "testdata/invalid_yaml_file.yaml", + shouldError: true, + }, + { + name: "error invalid role path", + path: "testdata/invalid_role_path.yaml", + shouldError: true, + }, + { + name: "error invalid role finalizer path", + path: "testdata/invalid_finalizer_role_path.yaml", + shouldError: true, + }, + { + name: "error invalid finalizer no path/role/vars", + path: "testdata/invalid_finalizer_no_vars.yaml", + shouldError: true, + }, + { + name: "error invalid duration", + path: "testdata/invalid_duration.yaml", + shouldError: true, + }, + { + name: "error invalid status", + path: "testdata/invalid_status.yaml", + shouldError: true, + }, + { + name: "if collection env var is not set and collection is not installed to the default locations, fail", + path: "testdata/invalid_collection.yaml", + shouldError: true, + }, + { + name: "valid watches file", + path: "testdata/valid.yaml", + maxConcurrentReconciles: 1, + ansibleVerbosity: 2, + shouldSetAnsibleCollectionPathEnvVar: true, + expected: validWatches, + }, + { + name: "should load file successfully with ANSIBLE ROLES PATH ENV VAR set", + path: "testdata/valid.yaml", + maxConcurrentReconciles: 1, + ansibleVerbosity: 2, + shouldSetAnsibleRolePathEnvVar: true, + shouldSetAnsibleCollectionPathEnvVar: true, + expected: validWatches, + }, + } + + os.Setenv("WORKER_MAXCONCURRENTRECONCILESENV_APP_EXAMPLE_COM", "4") + defer os.Unsetenv("WORKER_MAXCONCURRENTRECONCILESENV_APP_EXAMPLE_COM") + os.Setenv("ANSIBLE_VERBOSITY_ANSIBLEVERBOSITYENV_APP_EXAMPLE_COM", "4") + defer os.Unsetenv("ANSIBLE_VERBOSITY_ANSIBLEVERBOSITYENV_APP_EXAMPLE_COM") + + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + // Test Load with ANSIBLE_ROLES_PATH var + if tc.shouldSetAnsibleRolePathEnvVar { + anisbleEnvVar := "path/invalid:/path/invalid/myroles:" + wd + os.Setenv("ANSIBLE_ROLES_PATH", anisbleEnvVar) + defer os.Unsetenv("ANSIBLE_ROLES_PATH") + } + if tc.shouldSetAnsibleCollectionPathEnvVar { + + ansibleCollectionPathEnv := filepath.Join(wd, "testdata") + os.Setenv("ANSIBLE_COLLECTIONS_PATH", ansibleCollectionPathEnv) + defer os.Unsetenv("ANSIBLE_COLLECTIONS_PATH") + } + + watchSlice, err := Load(tc.path, tc.maxConcurrentReconciles, tc.ansibleVerbosity) + if err != nil && !tc.shouldError { + t.Fatalf("Error occurred unexpectedly: %v", err) + } + if err != nil && tc.shouldError { + return + } + // meant to protect from adding test to valid without corresponding check + if len(tc.expected) != len(watchSlice) { + t.Fatalf("Unexpected watches length: %v expected: %v", len(watchSlice), len(tc.expected)) + } + for idx, expectedWatch := range tc.expected { + gvk := expectedWatch.GroupVersionKind + gotWatch := watchSlice[idx] + if gotWatch.GroupVersionKind != gvk { + t.Fatalf("Unexpected GVK: \nunexpected GVK: %#v\nexpected GVK: %#v", + gotWatch.GroupVersionKind, gvk) + } + if gotWatch.Role != expectedWatch.Role { + t.Fatalf("The GVK: %v unexpected Role: %v expected Role: %v", gvk, gotWatch.Role, + expectedWatch.Role) + } + if gotWatch.Playbook != expectedWatch.Playbook { + t.Fatalf("The GVK: %v unexpected Playbook: %v expected Playbook: %v", gvk, gotWatch.Playbook, + expectedWatch.Playbook) + } + if gotWatch.ManageStatus != expectedWatch.ManageStatus { + t.Fatalf("The GVK: %v\nunexpected manageStatus:%#v\nexpected manageStatus: %#v", gvk, + gotWatch.ManageStatus, expectedWatch.ManageStatus) + } + if gotWatch.Finalizer != expectedWatch.Finalizer { + if gotWatch.Finalizer.Name != expectedWatch.Finalizer.Name || gotWatch.Finalizer.Playbook != + expectedWatch.Finalizer.Playbook || gotWatch.Finalizer.Role != + expectedWatch.Finalizer.Role || reflect.DeepEqual(gotWatch.Finalizer.Vars["sentinel"], + expectedWatch.Finalizer.Vars["sentininel"]) { + t.Fatalf("The GVK: %v\nunexpected finalizer: %#v\nexpected finalizer: %#v", gvk, + gotWatch.Finalizer, expectedWatch.Finalizer) + } + } + if gotWatch.ReconcilePeriod != expectedWatch.ReconcilePeriod { + t.Fatalf("The GVK: %v unexpected reconcile period: %v expected reconcile period: %v", gvk, + gotWatch.ReconcilePeriod, expectedWatch.ReconcilePeriod) + } + if gotWatch.MarkUnsafe != expectedWatch.MarkUnsafe { + t.Fatalf("The GVK: %v unexpected mark unsafe: %v expected mark unsafe: %v", gvk, + gotWatch.MarkUnsafe, expectedWatch.MarkUnsafe) + } + + for i, val := range expectedWatch.Blacklist { + if val != gotWatch.Blacklist[i] { + t.Fatalf("Incorrect blacklist GVK %s: got %s, expected %s", gvk, + val, gotWatch.Blacklist[i]) + } + } + + if !reflect.DeepEqual(gotWatch.Selector, expectedWatch.Selector) { + t.Fatalf("Incorrect selector GVK %s:\n\tgot %s\n\texpected %s", gvk, + gotWatch.Selector, expectedWatch.Selector) + } + + if expectedWatch.MaxConcurrentReconciles == 0 { + if gotWatch.MaxConcurrentReconciles != tc.maxConcurrentReconciles { + t.Fatalf("Unexpected max workers: %v expected workers: %v", gotWatch.MaxConcurrentReconciles, + tc.maxConcurrentReconciles) + } + } else { + if gotWatch.MaxConcurrentReconciles != expectedWatch.MaxConcurrentReconciles { + t.Fatalf("Unexpected max workers: %v expected workers: %v", gotWatch.MaxConcurrentReconciles, + expectedWatch.MaxConcurrentReconciles) + } + } + } + }) + } +} + +func TestMaxConcurrentReconciles(t *testing.T) { + testCases := []struct { + name string + gvk schema.GroupVersionKind + defValue int + expectedValue int + setEnv bool + envVarMap map[string]int + }{ + { + name: "no env, use default value", + gvk: schema.GroupVersionKind{ + Group: "cache.example.com", + Version: "v1alpha1", + Kind: "MemCacheService", + }, + defValue: 1, + expectedValue: 1, + setEnv: false, + envVarMap: map[string]int{ + "WORKER_MEMCACHESERVICE_CACHE_EXAMPLE_COM": 0, + }, + }, + { + name: "invalid env, use default value", + gvk: schema.GroupVersionKind{ + Group: "cache.example.com", + Version: "v1alpha1", + Kind: "MemCacheService", + }, + defValue: 1, + expectedValue: 1, + setEnv: true, + envVarMap: map[string]int{ + "WORKER_MEMCACHESERVICE_CACHE_EXAMPLE_COM": 0, + }, + }, + { + name: "worker_%s_%s env set to 3, expect 3", + gvk: schema.GroupVersionKind{ + Group: "cache.example.com", + Version: "v1alpha1", + Kind: "MemCacheService", + }, + defValue: 1, + expectedValue: 3, + setEnv: true, + envVarMap: map[string]int{ + "WORKER_MEMCACHESERVICE_CACHE_EXAMPLE_COM": 3, + }, + }, + { + name: "max_concurrent_reconciler_%s_%s set to 2, expect 2", + gvk: schema.GroupVersionKind{ + Group: "cache.example.com", + Version: "v1alpha1", + Kind: "MemCacheService", + }, + defValue: 1, + expectedValue: 2, + setEnv: true, + envVarMap: map[string]int{ + "MAX_CONCURRENT_RECONCILES_MEMCACHESERVICE_CACHE_EXAMPLE_COM": 2, + }, + }, + { + name: "set multiple env variables", + gvk: schema.GroupVersionKind{ + Group: "cache.example.com", + Version: "v1alpha1", + Kind: "MemCacheService", + }, + defValue: 1, + expectedValue: 3, + setEnv: true, + envVarMap: map[string]int{ + "MAX_CONCURRENT_RECONCILES_MEMCACHESERVICE_CACHE_EXAMPLE_COM": 3, + "WORKER_MEMCACHESERVICE_CACHE_EXAMPLE_COM": 1, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + for key, val := range tc.envVarMap { + os.Unsetenv(key) + if tc.setEnv { + os.Setenv(key, strconv.Itoa(val)) + } + } + workers := getMaxConcurrentReconciles(tc.gvk, tc.defValue) + if tc.expectedValue != workers { + t.Fatalf("Unexpected MaxConcurrentReconciles: %v expected MaxConcurrentReconciles: %v", + workers, tc.expectedValue) + } + }) + } +} + +func TestAnsibleVerbosity(t *testing.T) { + testCases := []struct { + name string + gvk schema.GroupVersionKind + defValue int + expectedValue int + setEnv bool + envKey string + envValue int + }{ + { + name: "no env, use default value", + gvk: schema.GroupVersionKind{ + Group: "cache.example.com", + Version: "v1alpha1", + Kind: "MemCacheService", + }, + defValue: 1, + expectedValue: 1, + setEnv: false, + envKey: "ANSIBLE_VERBOSITY_MEMCACHESERVICE_CACHE_EXAMPLE_COM", + }, + { + name: "invalid env, lt 0, use default value", + gvk: schema.GroupVersionKind{ + Group: "cache.example.com", + Version: "v1alpha1", + Kind: "MemCacheService", + }, + defValue: 1, + expectedValue: 1, + setEnv: true, + envKey: "ANSIBLE_VERBOSITY_MEMCACHESERVICE_CACHE_EXAMPLE_COM", + envValue: -1, + }, + { + name: "invalid env, gt 7, use default value", + gvk: schema.GroupVersionKind{ + Group: "cache.example.com", + Version: "v1alpha1", + Kind: "MemCacheService", + }, + defValue: 1, + expectedValue: 1, + setEnv: true, + envKey: "ANSIBLE_VERBOSITY_MEMCACHESERVICE_CACHE_EXAMPLE_COM", + envValue: 8, + }, + { + name: "env set to 3, expect 3", + gvk: schema.GroupVersionKind{ + Group: "cache.example.com", + Version: "v1alpha1", + Kind: "MemCacheService", + }, + defValue: 1, + expectedValue: 3, + setEnv: true, + envKey: "ANSIBLE_VERBOSITY_MEMCACHESERVICE_CACHE_EXAMPLE_COM", + envValue: 3, + }, + { + name: "boundary test 0", + gvk: schema.GroupVersionKind{ + Group: "cache.example.com", + Version: "v1alpha1", + Kind: "MemCacheService", + }, + defValue: 1, + expectedValue: 0, + setEnv: true, + envKey: "ANSIBLE_VERBOSITY_MEMCACHESERVICE_CACHE_EXAMPLE_COM", + envValue: 0, + }, + { + name: "boundary test 7", + gvk: schema.GroupVersionKind{ + Group: "cache.example.com", + Version: "v1alpha1", + Kind: "MemCacheService", + }, + defValue: 1, + expectedValue: 7, + setEnv: true, + envKey: "ANSIBLE_VERBOSITY_MEMCACHESERVICE_CACHE_EXAMPLE_COM", + envValue: 7, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + os.Unsetenv(tc.envKey) + if tc.setEnv { + os.Setenv(tc.envKey, strconv.Itoa(tc.envValue)) + } + verbosity := getAnsibleVerbosity(tc.gvk, tc.defValue) + if tc.expectedValue != verbosity { + t.Fatalf("Unexpected Verbosity: %v expected Verbosity: %v", verbosity, tc.expectedValue) + } + }) + } +} + +// Test the func getPossibleRolePaths. +func TestGetPossibleRolePaths(t *testing.T) { + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + // Mock default Full Path based in the current directory + rolesPath := filepath.Join(wd, "roles") + home, err := os.UserHomeDir() + if err != nil { + t.Fatal(err) + } + + type args struct { + path string + rolesEnv string + collectionsEnv string + } + tests := []struct { + name string + args args + want []string + }{ + { + name: "check the current dir for a role name", + args: args{ + path: "Foo", + }, + want: []string{filepath.Join(rolesPath, "Foo")}, + }, + { + name: "check the current dir for a relative path", + args: args{ + path: "relative/Foo", + }, + want: []string{filepath.Join(rolesPath, "relative/Foo")}, + }, + { + name: "check all paths in ANSIBLE_ROLES_PATH env var", + args: args{ + rolesEnv: "relative:nested/relative:/and/abs", + path: "Foo", + }, + want: []string{ + filepath.Join(rolesPath, "Foo"), + filepath.Join("relative", "Foo"), + filepath.Join("relative", "roles", "Foo"), + filepath.Join("nested/relative", "Foo"), + filepath.Join("nested/relative", "roles", "Foo"), + filepath.Join("/and/abs", "Foo"), + filepath.Join("/and/abs", "roles", "Foo"), + }, + }, + { + name: "Check for roles inside default collection locations when given fqcn", + args: args{ + path: "myNS.myCol.myRole", + }, + want: []string{ + filepath.Join(rolesPath, "myNS.myCol.myRole"), + filepath.Join("/usr/share/ansible/collections", "ansible_collections", "myNS", "myCol", "roles", "myRole"), + filepath.Join(home, ".ansible/collections", "ansible_collections", "myNS", "myCol", "roles", "myRole"), + }, + }, + { + name: "Check for roles inside ANSIBLE_COLLECTIONS_PATH locations when set and given path is fqcn", + args: args{ + path: "myNS.myCol.myRole", + collectionsEnv: "/my/collections/", + }, + want: []string{ + filepath.Join(rolesPath, "myNS.myCol.myRole"), + filepath.Join("/my/collections/", "ansible_collections", "myNS", "myCol", "roles", "myRole"), + // Note: Defaults are not checked when the env variable is set + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + if len(tt.args.rolesEnv) > 0 { + os.Setenv("ANSIBLE_ROLES_PATH", tt.args.rolesEnv) + defer os.Unsetenv("ANSIBLE_ROLES_PATH") + } + if len(tt.args.collectionsEnv) > 0 { + os.Setenv("ANSIBLE_COLLECTIONS_PATH", tt.args.collectionsEnv) + defer os.Unsetenv("ANSIBLE_COLLECTIONS_PATH") + } + + allPathsToCheck := getPossibleRolePaths(wd, tt.args.path) + sort.Strings(tt.want) + sort.Strings(allPathsToCheck) + if !reflect.DeepEqual(allPathsToCheck, tt.want) { + t.Errorf("Unexpected paths returned") + fmt.Println("Returned:") + for i, path := range allPathsToCheck { + fmt.Println(i, path) + } + fmt.Println("Wanted:") + for i, path := range tt.want { + fmt.Println(i, path) + } + } + }) + } +} diff --git a/internal/cmd/ansible-operator/run/cmd.go b/internal/cmd/ansible-operator/run/cmd.go new file mode 100644 index 0000000..fb0f172 --- /dev/null +++ b/internal/cmd/ansible-operator/run/cmd.go @@ -0,0 +1,365 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package run + +import ( + "errors" + "flag" + "fmt" + "os" + "runtime" + "strconv" + "strings" + "time" + + "github.com/spf13/cobra" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/controller-runtime/pkg/healthz" + logf "sigs.k8s.io/controller-runtime/pkg/log" + zapf "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" + crmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" + + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/apiserver" + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/controller" + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/events" + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/flags" + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/metrics" + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/proxy" + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/proxy/controllermap" + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/runner" + "github.com/operator-framework/ansible-operator-plugins/internal/ansible/watches" + "github.com/operator-framework/ansible-operator-plugins/internal/util/k8sutil" + sdkVersion "github.com/operator-framework/ansible-operator-plugins/internal/version" +) + +var log = logf.Log.WithName("cmd") + +func printVersion() { + version := sdkVersion.GitVersion + if version == "unknown" { + version = sdkVersion.Version + } + log.Info("Version", + "Go Version", runtime.Version(), + "GOOS", runtime.GOOS, + "GOARCH", runtime.GOARCH, + "ansible-operator", version, + "commit", sdkVersion.GitCommit) +} + +func NewCmd() *cobra.Command { + f := &flags.Flags{} + zapfs := flag.NewFlagSet("zap", flag.ExitOnError) + opts := &zapf.Options{} + opts.BindFlags(zapfs) + + cmd := &cobra.Command{ + Use: "run", + Short: "Run the operator", + Run: func(cmd *cobra.Command, _ []string) { + logf.SetLogger(zapf.New(zapf.UseFlagOptions(opts))) + run(cmd, f) + }, + } + + f.AddTo(cmd.Flags()) + cmd.Flags().AddGoFlagSet(zapfs) + return cmd +} + +func run(cmd *cobra.Command, f *flags.Flags) { + printVersion() + metrics.RegisterBuildInfo(crmetrics.Registry) + + // Load config options from the config at f.ManagerConfigPath. + // These options will not override those set by flags. + var ( + options manager.Options + err error + ) + if f.ManagerConfigPath != "" { + cfgLoader := ctrl.ConfigFile().AtPath(f.ManagerConfigPath) + if options, err = options.AndFrom(cfgLoader); err != nil { + log.Error(err, "Unable to load the manager config file") + os.Exit(1) + } + } + exitIfUnsupported(options) + + cfg, err := config.GetConfig() + if err != nil { + log.Error(err, "Failed to get config.") + os.Exit(1) + } + + // TODO(2.0.0): remove + // Deprecated: OPERATOR_NAME environment variable is an artifact of the + // legacy operator-sdk project scaffolding. Flag `--leader-election-id` + // should be used instead. + if operatorName, found := os.LookupEnv("OPERATOR_NAME"); found { + log.Info("Environment variable OPERATOR_NAME has been deprecated, use --leader-election-id instead.") + if cmd.Flags().Changed("leader-election-id") { + log.Info("Ignoring OPERATOR_NAME environment variable since --leader-election-id is set") + } else if options.LeaderElectionID == "" { + // Only set leader election ID using OPERATOR_NAME if unset everywhere else, + // since this env var is deprecated. + options.LeaderElectionID = operatorName + } + } + + //TODO(2.0.0): remove the following checks. they are required just because of the flags deprecation + if cmd.Flags().Changed("leader-elect") && cmd.Flags().Changed("enable-leader-election") { + log.Error(errors.New("only one of --leader-elect and --enable-leader-election may be set"), "invalid flags usage") + os.Exit(1) + } + + if cmd.Flags().Changed("metrics-addr") && cmd.Flags().Changed("metrics-bind-address") { + log.Error(errors.New("only one of --metrics-addr and --metrics-bind-address may be set"), "invalid flags usage") + os.Exit(1) + } + + // Set default manager options + // TODO: probably should expose the host & port as an environment variables + options = f.ToManagerOptions(options) + if options.NewClient == nil { + options.NewClient = func(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) { + // Create the Client for Write operations. + c, err := client.New(config, options) + if err != nil { + return nil, err + } + return client.NewDelegatingClient(client.NewDelegatingClientInput{ + CacheReader: cache, + Client: c, + UncachedObjects: uncachedObjects, + CacheUnstructured: true, + }) + } + } + + namespace, found := os.LookupEnv(k8sutil.WatchNamespaceEnvVar) + log = log.WithValues("Namespace", namespace) + if found { + log.V(1).Info(fmt.Sprintf("Setting namespace with value in %s", k8sutil.WatchNamespaceEnvVar)) + if namespace == metav1.NamespaceAll { + log.Info("Watching all namespaces.") + options.Namespace = metav1.NamespaceAll + } else { + if strings.Contains(namespace, ",") { + log.Info("Watching multiple namespaces.") + options.NewCache = cache.MultiNamespacedCacheBuilder(strings.Split(namespace, ",")) + } else { + log.Info("Watching single namespace.") + options.Namespace = namespace + } + } + } else if options.Namespace == "" { + log.Info(fmt.Sprintf("Watch namespaces not configured by environment variable %s or file. "+ + "Watching all namespaces.", k8sutil.WatchNamespaceEnvVar)) + options.Namespace = metav1.NamespaceAll + } + + err = setAnsibleEnvVars(f) + if err != nil { + log.Error(err, "Failed to set environment variable.") + os.Exit(1) + } + + // Create a new manager to provide shared dependencies and start components + mgr, err := manager.New(cfg, options) + if err != nil { + log.Error(err, "Failed to create a new manager.") + os.Exit(1) + } + + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + log.Error(err, "Unable to set up health check") + os.Exit(1) + } + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + log.Error(err, "Unable to set up ready check") + os.Exit(1) + } + + cMap := controllermap.NewControllerMap() + watches, err := watches.Load(f.WatchesFile, f.MaxConcurrentReconciles, f.AnsibleVerbosity) + if err != nil { + log.Error(err, "Failed to load watches.") + os.Exit(1) + } + for _, w := range watches { + reconcilePeriod := f.ReconcilePeriod + if w.ReconcilePeriod.Duration != time.Duration(0) { + // if a duration other than default was passed in through watches, + // it will take precedence over the command-line flag + reconcilePeriod = w.ReconcilePeriod.Duration + } + + runner, err := runner.New(w, f.AnsibleArgs) + if err != nil { + log.Error(err, "Failed to create runner") + os.Exit(1) + } + + ctr := controller.Add(mgr, controller.Options{ + GVK: w.GroupVersionKind, + Runner: runner, + ManageStatus: w.ManageStatus, + AnsibleDebugLogs: getAnsibleDebugLog(), + MaxConcurrentReconciles: w.MaxConcurrentReconciles, + ReconcilePeriod: reconcilePeriod, + Selector: w.Selector, + LoggingLevel: getAnsibleEventsToLog(f), + WatchAnnotationsChanges: w.WatchAnnotationsChanges, + }) + if ctr == nil { + log.Error(fmt.Errorf("failed to add controller for GVK %v", w.GroupVersionKind.String()), "") + os.Exit(1) + } + + cMap.Store(w.GroupVersionKind, &controllermap.Contents{Controller: *ctr, //nolint:staticcheck + WatchDependentResources: w.WatchDependentResources, + WatchClusterScopedResources: w.WatchClusterScopedResources, + OwnerWatchMap: controllermap.NewWatchMap(), + AnnotationWatchMap: controllermap.NewWatchMap(), + }, w.Blacklist) + } + + // TODO(2.0.0): remove + err = mgr.AddHealthzCheck("ping", healthz.Ping) + if err != nil { + log.Error(err, "Failed to add Healthz check.") + } + + done := make(chan error) + + // start the proxy + err = proxy.Run(done, proxy.Options{ + Address: "localhost", + Port: f.ProxyPort, + KubeConfig: mgr.GetConfig(), + Cache: mgr.GetCache(), + RESTMapper: mgr.GetRESTMapper(), + ControllerMap: cMap, + OwnerInjection: f.InjectOwnerRef, + WatchedNamespaces: strings.Split(namespace, ","), + }) + if err != nil { + log.Error(err, "Error starting proxy.") + os.Exit(1) + } + // start the ansible-operator api server + go func() { + err = apiserver.Run(apiserver.Options{ + Address: "localhost", + Port: 5050, + }) + done <- err + }() + + // start the operator + go func() { + done <- mgr.Start(signals.SetupSignalHandler()) + }() + + // wait for either to finish + err = <-done + if err != nil { + log.Error(err, "Proxy or operator exited with error.") + os.Exit(1) + } + log.Info("Exiting.") +} + +// exitIfUnsupported prints an error containing unsupported field names and exits +// if any of those fields are not their default values. +func exitIfUnsupported(options manager.Options) { + var keys []string + // The below options are webhook-specific, which is not supported by ansible. + if options.CertDir != "" { + keys = append(keys, "certDir") + } + if options.Host != "" { + keys = append(keys, "host") + } + if options.Port != 0 { + keys = append(keys, "port") + } + + if len(keys) > 0 { + log.Error(fmt.Errorf("%s set in manager options", strings.Join(keys, ", ")), "unsupported fields") + os.Exit(1) + } +} + +// getAnsibleDebugLog return the value from the ANSIBLE_DEBUG_LOGS it order to +// print the full Ansible logs +func getAnsibleDebugLog() bool { + const envVar = "ANSIBLE_DEBUG_LOGS" + val := false + if envVal, ok := os.LookupEnv(envVar); ok { + if i, err := strconv.ParseBool(envVal); err != nil { + log.Info("Could not parse environment variable as an boolean; using default value", + "envVar", envVar, "default", val) + } else { + val = i + } + } else if !ok { + log.Info("Environment variable not set; using default value", "envVar", envVar, + envVar, val) + } + return val +} + +// getAnsibleEventsToLog return the integer value of the log level set in the flag +func getAnsibleEventsToLog(f *flags.Flags) events.LogLevel { + if strings.ToLower(f.AnsibleLogEvents) == "everything" { + return events.Everything + } else if strings.ToLower(f.AnsibleLogEvents) == "nothing" { + return events.Nothing + } else { + if strings.ToLower(f.AnsibleLogEvents) != "tasks" && f.AnsibleLogEvents != "" { + log.Error(fmt.Errorf("--ansible-log-events flag value '%s' not recognized. Must be one of: Tasks, Everything, Nothing", f.AnsibleLogEvents), "unrecognized log level") + } + return events.Tasks // Tasks is the default + } +} + +// setAnsibleEnvVars will set environment variables based on CLI flags +func setAnsibleEnvVars(f *flags.Flags) error { + if len(f.AnsibleRolesPath) > 0 { + if err := os.Setenv(flags.AnsibleRolesPathEnvVar, f.AnsibleRolesPath); err != nil { + return fmt.Errorf("failed to set environment variable %s: %v", flags.AnsibleRolesPathEnvVar, err) + } + log.Info("Set the environment variable", "envVar", flags.AnsibleRolesPathEnvVar, + "value", f.AnsibleRolesPath) + } + + if len(f.AnsibleCollectionsPath) > 0 { + if err := os.Setenv(flags.AnsibleCollectionsPathEnvVar, f.AnsibleCollectionsPath); err != nil { + return fmt.Errorf("failed to set environment variable %s: %v", flags.AnsibleCollectionsPathEnvVar, err) + } + log.Info("Set the environment variable", "envVar", flags.AnsibleCollectionsPathEnvVar, + "value", f.AnsibleCollectionsPath) + } + return nil +} diff --git a/internal/cmd/ansible-operator/run/proxy_suite_test.go b/internal/cmd/ansible-operator/run/proxy_suite_test.go new file mode 100644 index 0000000..53094ef --- /dev/null +++ b/internal/cmd/ansible-operator/run/proxy_suite_test.go @@ -0,0 +1,27 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package run + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestVersion(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "cmd suite") +} diff --git a/internal/cmd/ansible-operator/version/cmd.go b/internal/cmd/ansible-operator/version/cmd.go new file mode 100644 index 0000000..60301f8 --- /dev/null +++ b/internal/cmd/ansible-operator/version/cmd.go @@ -0,0 +1,44 @@ +// Copyright 2019 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "fmt" + "runtime" + + "github.com/spf13/cobra" + + ver "github.com/operator-framework/ansible-operator-plugins/internal/version" +) + +func NewCmd() *cobra.Command { + versionCmd := &cobra.Command{ + Use: "version", + Short: "Prints the version of operator-sdk", + Run: func(cmd *cobra.Command, args []string) { + run() + }, + } + return versionCmd +} + +func run() { + version := ver.GitVersion + if version == "unknown" { + version = ver.Version + } + fmt.Printf("ansible-operator version: %q, commit: %q, kubernetes version: %q, go version: %q, GOOS: %q, GOARCH: %q\n", + version, ver.GitCommit, ver.KubernetesVersion, runtime.Version(), runtime.GOOS, runtime.GOARCH) +} diff --git a/internal/cmd/ansible-operator/version/cmd_test.go b/internal/cmd/ansible-operator/version/cmd_test.go new file mode 100644 index 0000000..5d3b803 --- /dev/null +++ b/internal/cmd/ansible-operator/version/cmd_test.go @@ -0,0 +1,65 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "fmt" + "io" + "os" + "runtime" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + ver "github.com/operator-framework/ansible-operator-plugins/internal/version" +) + +var _ = Describe("Running a version command", func() { + Describe("NewCmd", func() { + It("builds a cobra command", func() { + cmd := NewCmd() + Expect(cmd).NotTo(BeNil()) + Expect(cmd.Use).NotTo(Equal("")) + Expect(cmd.Short).NotTo(Equal("")) + }) + }) + Describe("run", func() { + It("prints the correct version info", func() { + r, w, _ := os.Pipe() + tmp := os.Stdout + defer func() { + os.Stdout = tmp + }() + os.Stdout = w + go func() { + run() + w.Close() + }() + stdout, err := io.ReadAll(r) + Expect(err).ToNot(HaveOccurred()) + stdoutString := string(stdout) + version := ver.GitVersion + if version == "unknown" { + version = ver.Version + } + Expect(stdoutString).To(ContainSubstring(fmt.Sprintf("version: %q", version))) + Expect(stdoutString).To(ContainSubstring(fmt.Sprintf("commit: %q", ver.GitCommit))) + Expect(stdoutString).To(ContainSubstring(fmt.Sprintf("kubernetes version: %q", ver.KubernetesVersion))) + Expect(stdoutString).To(ContainSubstring(fmt.Sprintf("go version: %q", runtime.Version()))) + Expect(stdoutString).To(ContainSubstring(fmt.Sprintf("GOOS: %q", runtime.GOOS))) + Expect(stdoutString).To(ContainSubstring(fmt.Sprintf("GOARCH: %q", runtime.GOARCH))) + }) + }) +}) diff --git a/internal/cmd/ansible-operator/version/version_suite_test.go b/internal/cmd/ansible-operator/version/version_suite_test.go new file mode 100644 index 0000000..37c17fd --- /dev/null +++ b/internal/cmd/ansible-operator/version/version_suite_test.go @@ -0,0 +1,27 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestVersion(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Version Cmd Suite") +} diff --git a/internal/flags/flags.go b/internal/flags/flags.go new file mode 100644 index 0000000..cc374cf --- /dev/null +++ b/internal/flags/flags.go @@ -0,0 +1,20 @@ +// Copyright 2019 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package flags + +// global command-line flags +const ( + VerboseOpt = "verbose" +) diff --git a/internal/plugins/ansible/v1/api.go b/internal/plugins/ansible/v1/api.go new file mode 100644 index 0000000..dc70295 --- /dev/null +++ b/internal/plugins/ansible/v1/api.go @@ -0,0 +1,167 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ansible + +import ( + "errors" + "fmt" + + "github.com/sirupsen/logrus" + "github.com/spf13/pflag" + "sigs.k8s.io/kubebuilder/v3/pkg/config" + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" + "sigs.k8s.io/kubebuilder/v3/pkg/model/resource" + "sigs.k8s.io/kubebuilder/v3/pkg/plugin" + pluginutil "sigs.k8s.io/kubebuilder/v3/pkg/plugin/util" + + "github.com/operator-framework/ansible-operator-plugins/internal/plugins/ansible/v1/scaffolds" + "github.com/operator-framework/ansible-operator-plugins/internal/plugins/util" +) + +const ( + crdVersionFlag = "crd-version" + generatePlaybookFlag = "generate-playbook" + generateRoleFlag = "generate-role" + + defaultCrdVersion = "v1" + legacyCrdVersion = "v1beta1" +) + +type createAPIOptions struct { + CRDVersion string + DoRole, DoPlaybook bool +} + +func (opts createAPIOptions) UpdateResource(res *resource.Resource) { + res.API = &resource.API{ + CRDVersion: opts.CRDVersion, + Namespaced: true, + } + + // Ensure that Path is empty and Controller false as this is not a Go project + res.Path = "" + res.Controller = false +} + +var _ plugin.CreateAPISubcommand = &createAPISubcommand{} + +type createAPISubcommand struct { + config config.Config + resource *resource.Resource + options createAPIOptions +} + +func (p *createAPISubcommand) UpdateMetadata(cliMeta plugin.CLIMetadata, subcmdMeta *plugin.SubcommandMetadata) { + subcmdMeta.Description = `Scaffold a Kubernetes API in which the controller is an Ansible role or playbook. + + - generates a Custom Resource Definition and sample + - Updates watches.yaml + - optionally generates Ansible Role tree + - optionally generates Ansible playbook + + For the scaffolded operator to be runnable with no changes, specify either --generate-role or --generate-playbook. + +` + subcmdMeta.Examples = fmt.Sprintf(`# Create a new API, without Ansible roles or playbooks + $ %[1]s create api \ + --group=apps --version=v1alpha1 \ + --kind=AppService + + $ %[1]s create api \ + --group=apps --version=v1alpha1 \ + --kind=AppService \ + --generate-role + + $ %[1]s create api \ + --group=apps --version=v1alpha1 \ + --kind=AppService \ + --generate-playbook + + $ %[1]s create api \ + --group=apps --version=v1alpha1 \ + --kind=AppService + --generate-playbook + --generate-role +`, cliMeta.CommandName) +} + +func (p *createAPISubcommand) BindFlags(fs *pflag.FlagSet) { + fs.SortFlags = false + fs.StringVar(&p.options.CRDVersion, crdVersionFlag, defaultCrdVersion, "crd version to generate") + // (not required raise an error in this case) + // nolint:errcheck,gosec + fs.MarkDeprecated(crdVersionFlag, util.WarnMessageRemovalV1beta1) + + fs.BoolVar(&p.options.DoRole, generateRoleFlag, false, "Generate an Ansible role skeleton.") + fs.BoolVar(&p.options.DoPlaybook, generatePlaybookFlag, false, "Generate an Ansible playbook. If passed with --generate-role, the playbook will invoke the role.") +} + +func (p *createAPISubcommand) InjectConfig(c config.Config) error { + p.config = c + + return nil +} + +func (p *createAPISubcommand) PreScaffold(machinery.Filesystem) error { + if p.options.CRDVersion == legacyCrdVersion { + logrus.Warn(util.WarnMessageRemovalV1beta1) + } + return nil +} + +func (p *createAPISubcommand) InjectResource(res *resource.Resource) error { + p.resource = res + + p.options.UpdateResource(p.resource) + + if err := p.resource.Validate(); err != nil { + return err + } + + // Check that resource doesn't have the API scaffolded + if res, err := p.config.GetResource(p.resource.GVK); err == nil && res.HasAPI() { + return errors.New("the API resource already exists") + } + + // Check that the provided group can be added to the project + if !p.config.IsMultiGroup() && p.config.ResourcesLength() != 0 && !p.config.HasGroup(p.resource.Group) { + return fmt.Errorf("multiple groups are not allowed by default, to enable multi-group set 'multigroup: true' in your PROJECT file") + } + + // Selected CRD version must match existing CRD versions. + // nolint:staticcheck + if pluginutil.HasDifferentCRDVersion(p.config, p.resource.API.CRDVersion) { + return fmt.Errorf("only one CRD version can be used for all resources, cannot add %q", p.resource.API.CRDVersion) + } + + return nil +} + +func (p *createAPISubcommand) Scaffold(fs machinery.Filesystem) error { + if err := util.RemoveKustomizeCRDManifests(); err != nil { + return fmt.Errorf("error removing kustomization CRD manifests: %v", err) + } + if err := util.UpdateKustomizationsCreateAPI(); err != nil { + return fmt.Errorf("error updating kustomization.yaml files: %v", err) + } + + scaffolder := scaffolds.NewCreateAPIScaffolder(p.config, *p.resource, p.options.DoRole, p.options.DoPlaybook) + scaffolder.InjectFS(fs) + if err := scaffolder.Scaffold(); err != nil { + return err + } + + return nil +} diff --git a/internal/plugins/ansible/v1/constants/constants.go b/internal/plugins/ansible/v1/constants/constants.go new file mode 100644 index 0000000..91aa6a6 --- /dev/null +++ b/internal/plugins/ansible/v1/constants/constants.go @@ -0,0 +1,30 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package constants + +import ( + "path/filepath" +) + +const ( + filePathSep = string(filepath.Separator) + RolesDir = "roles" + PlaybooksDir = "playbooks" + MoleculeDir = "molecule" + MoleculeDefaultDir = MoleculeDir + filePathSep + "default" + MoleculeTestLocalDir = MoleculeDir + filePathSep + "test-local" + MoleculeClusterDir = MoleculeDir + filePathSep + "cluster" + MoleculeTemplatesDir = MoleculeDir + filePathSep + "templates" +) diff --git a/internal/plugins/ansible/v1/init.go b/internal/plugins/ansible/v1/init.go new file mode 100644 index 0000000..c370511 --- /dev/null +++ b/internal/plugins/ansible/v1/init.go @@ -0,0 +1,267 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ansible + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/spf13/pflag" + "sigs.k8s.io/kubebuilder/v3/pkg/config" + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" + "sigs.k8s.io/kubebuilder/v3/pkg/plugin" + "sigs.k8s.io/kubebuilder/v3/pkg/plugin/util" + + "github.com/operator-framework/ansible-operator-plugins/internal/plugins/ansible/v1/scaffolds" + sdkpluginutil "github.com/operator-framework/ansible-operator-plugins/internal/plugins/util" +) + +const ( + groupFlag = "group" + versionFlag = "version" + kindFlag = "kind" +) + +var _ plugin.InitSubcommand = &initSubcommand{} + +type initSubcommand struct { + // Wrapped plugin that we will call at post-scaffold + apiSubcommand createAPISubcommand + + config config.Config + + // For help text. + commandName string + + // Flags + group string + version string + kind string +} + +// UpdateContext injects documentation for the command +func (p *initSubcommand) UpdateMetadata(cliMeta plugin.CLIMetadata, subcmdMeta *plugin.SubcommandMetadata) { + subcmdMeta.Description = ` +Initialize a new Ansible-based operator project. + +Writes the following files +- a kubebuilder PROJECT file with the domain and project layout configuration +- a Makefile that provides an interface for building and managing the operator +- Kubernetes manifests and kustomize configuration +- a watches.yaml file that defines the mapping between APIs and Roles/Playbooks + +Optionally creates a new API, using the same flags as "create api" +` + subcmdMeta.Examples = fmt.Sprintf(` + # Scaffold a project with no API + $ %[1]s init --plugins=%[2]s --domain=my.domain \ + + # Invokes "create api" + $ %[1]s init --plugins=%[2]s \ + --domain=my.domain \ + --group=apps --version=v1alpha1 --kind=AppService + + $ %[1]s init --plugins=%[2]s \ + --domain=my.domain \ + --group=apps --version=v1alpha1 --kind=AppService \ + --generate-role + + $ %[1]s init --plugins=%[2]s \ + --domain=my.domain \ + --group=apps --version=v1alpha1 --kind=AppService \ + --generate-playbook + + $ %[1]s init --plugins=%[2]s \ + --domain=my.domain \ + --group=apps --version=v1alpha1 --kind=AppService \ + --generate-playbook \ + --generate-role +`, cliMeta.CommandName, pluginKey) + + p.commandName = cliMeta.CommandName +} + +func (p *initSubcommand) BindFlags(fs *pflag.FlagSet) { + fs.SortFlags = false + fs.StringVar(&p.group, "group", "", "resource Group") + fs.StringVar(&p.version, "version", "", "resource Version") + fs.StringVar(&p.kind, "kind", "", "resource Kind") + p.apiSubcommand.BindFlags(fs) +} + +func (p *initSubcommand) InjectConfig(c config.Config) error { + p.config = c + return nil +} + +func (p *initSubcommand) Scaffold(fs machinery.Filesystem) error { + + if err := addInitCustomizations(p.config.GetProjectName(), p.config.IsComponentConfig()); err != nil { + return fmt.Errorf("error updating init manifests: %s", err) + } + + scaffolder := scaffolds.NewInitScaffolder(p.config) + scaffolder.InjectFS(fs) + return scaffolder.Scaffold() +} + +func (p *initSubcommand) PostScaffold() error { + doAPI := p.group != "" || p.version != "" || p.kind != "" + if !doAPI { + fmt.Printf("Next: define a resource with:\n$ %s create api\n", p.commandName) + } else { + args := []string{"create", "api"} + // The following three checks should match the default values in sig.k8s.io/kubebuilder/v3/pkg/cli/resource.go + if p.group != "" { + args = append(args, fmt.Sprintf("--%s", groupFlag), p.group) + } + if p.version != "" { + args = append(args, fmt.Sprintf("--%s", versionFlag), p.version) + } + if p.kind != "" { + args = append(args, fmt.Sprintf("--%s", kindFlag), p.kind) + } + if p.apiSubcommand.options.CRDVersion != defaultCrdVersion { + args = append(args, fmt.Sprintf("--%s", crdVersionFlag), p.apiSubcommand.options.CRDVersion) + } + if p.apiSubcommand.options.DoPlaybook { + args = append(args, fmt.Sprintf("--%s", generatePlaybookFlag)) + } + if p.apiSubcommand.options.DoRole { + args = append(args, fmt.Sprintf("--%s", generateRoleFlag)) + } + if err := util.RunCmd("Creating the API", os.Args[0], args...); err != nil { + return err + } + } + + return nil +} + +// addInitCustomizations will perform the required customizations for this plugin on the common base +func addInitCustomizations(projectName string, componentConfig bool) error { + managerFile := filepath.Join("config", "manager", "manager.yaml") + managerProxyPatchFile := filepath.Join("config", "default", "manager_auth_proxy_patch.yaml") + + // todo: we ought to use afero instead. Replace this methods to insert/update + // by https://github.com/kubernetes-sigs/kubebuilder/pull/2119 + + // Add leader election + if componentConfig { + err := util.InsertCode(managerFile, + "- /manager", + fmt.Sprintf("\n args:\n - --leader-election-id=%s", projectName)) + if err != nil { + return err + } + + err = util.InsertCode(managerProxyPatchFile, + "memory: 64Mi", + fmt.Sprintf("\n - name: manager\n args:\n - \"--leader-election-id=%s\"", projectName)) + if err != nil { + return err + } + } else { + err := util.InsertCode(managerFile, + "--leader-elect", + fmt.Sprintf("\n - --leader-election-id=%s", projectName)) + if err != nil { + return err + } + err = util.InsertCode(managerProxyPatchFile, + "- \"--leader-elect\"", + fmt.Sprintf("\n - \"--leader-election-id=%s\"", projectName)) + if err != nil { + return err + } + } + + // update default resource request and limits with bigger values + const resourcesLimitsFragment = ` resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + ` + + const resourcesLimitsAnsibleFragment = ` resources: + limits: + cpu: 500m + memory: 768Mi + requests: + cpu: 10m + memory: 256Mi + ` + + err := util.ReplaceInFile(managerFile, resourcesLimitsFragment, resourcesLimitsAnsibleFragment) + if err != nil { + return err + } + + // Add ANSIBLE_GATHERING env var + const envVar = ` + env: + - name: ANSIBLE_GATHERING + value: explicit` + err = util.InsertCode(managerFile, "name: manager", envVar) + if err != nil { + return err + } + + // replace the default ports because ansible has been using another one + // todo: remove it when we be able to change the port for the default one + // issue: https://github.com/operator-framework/ansible-operator-plugins/issues/4331 + err = util.ReplaceInFile(managerFile, "port: 8081", "port: 6789") + if err != nil { + return err + } + + if componentConfig { + managerConfigFile := filepath.Join("config", "manager", "controller_manager_config.yaml") + err = util.ReplaceInFile(managerConfigFile, "8081", "6789") + if err != nil { + return err + } + // Remove the webhook option for the componentConfig since webhooks are not supported by ansible + err = util.ReplaceInFile(managerConfigFile, "webhook:\n port: 9443", "") + if err != nil { + return err + } + } else { + err = util.ReplaceInFile(managerProxyPatchFile, "8081", "6789") + if err != nil { + return err + } + } + + // Remove the call to the command as manager. Helm/Ansible has not been exposing this entrypoint + // todo: provide the manager entrypoint for helm/ansible and then remove it + const command = `command: + - /manager + ` + err = util.ReplaceInFile(managerFile, command, "") + if err != nil { + return err + } + + if err := sdkpluginutil.UpdateKustomizationsInit(); err != nil { + return fmt.Errorf("error updating kustomization.yaml files: %v", err) + } + + return nil +} diff --git a/internal/plugins/ansible/v1/plugin.go b/internal/plugins/ansible/v1/plugin.go new file mode 100644 index 0000000..8383c37 --- /dev/null +++ b/internal/plugins/ansible/v1/plugin.go @@ -0,0 +1,48 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ansible + +import ( + "sigs.k8s.io/kubebuilder/v3/pkg/config" + cfgv3 "sigs.k8s.io/kubebuilder/v3/pkg/config/v3" + "sigs.k8s.io/kubebuilder/v3/pkg/plugin" + + "github.com/operator-framework/ansible-operator-plugins/internal/plugins" +) + +const pluginName = "base.ansible" + plugins.DefaultNameQualifier + +var ( + pluginVersion = plugin.Version{Number: 1} + supportedProjectVersions = []config.Version{cfgv3.Version} + pluginKey = plugin.KeyFor(Plugin{}) +) + +var ( + _ plugin.Plugin = Plugin{} + _ plugin.Init = Plugin{} + _ plugin.CreateAPI = Plugin{} +) + +type Plugin struct { + initSubcommand + createAPISubcommand +} + +func (Plugin) Name() string { return pluginName } +func (Plugin) Version() plugin.Version { return pluginVersion } +func (Plugin) SupportedProjectVersions() []config.Version { return supportedProjectVersions } +func (p Plugin) GetInitSubcommand() plugin.InitSubcommand { return &p.initSubcommand } +func (p Plugin) GetCreateAPISubcommand() plugin.CreateAPISubcommand { return &p.createAPISubcommand } diff --git a/internal/plugins/ansible/v1/scaffolds/api.go b/internal/plugins/ansible/v1/scaffolds/api.go new file mode 100644 index 0000000..faeb718 --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/api.go @@ -0,0 +1,108 @@ +/* +Copyright 2019 The Kubernetes Authors. +Modifications copyright 2020 The Operator-SDK Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scaffolds + +import ( + "sigs.k8s.io/kubebuilder/v3/pkg/config" + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" + "sigs.k8s.io/kubebuilder/v3/pkg/model/resource" + "sigs.k8s.io/kubebuilder/v3/pkg/plugins" + + "github.com/operator-framework/ansible-operator-plugins/internal/plugins/ansible/v1/constants" + "github.com/operator-framework/ansible-operator-plugins/internal/plugins/ansible/v1/scaffolds/internal/templates" + "github.com/operator-framework/ansible-operator-plugins/internal/plugins/ansible/v1/scaffolds/internal/templates/config/crd" + "github.com/operator-framework/ansible-operator-plugins/internal/plugins/ansible/v1/scaffolds/internal/templates/config/rbac" + "github.com/operator-framework/ansible-operator-plugins/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault" + "github.com/operator-framework/ansible-operator-plugins/internal/plugins/ansible/v1/scaffolds/internal/templates/playbooks" + ansibleroles "github.com/operator-framework/ansible-operator-plugins/internal/plugins/ansible/v1/scaffolds/internal/templates/roles" +) + +var _ plugins.Scaffolder = &apiScaffolder{} + +type apiScaffolder struct { + fs machinery.Filesystem + + config config.Config + resource resource.Resource + + doRole, doPlaybook bool +} + +// NewCreateAPIScaffolder returns a new plugins.Scaffolder for project initialization operations +func NewCreateAPIScaffolder(cfg config.Config, res resource.Resource, doRole, doPlaybook bool) plugins.Scaffolder { + return &apiScaffolder{ + config: cfg, + resource: res, + doRole: doRole, + doPlaybook: doPlaybook, + } +} + +// InjectFS implements plugins.Scaffolder +func (s *apiScaffolder) InjectFS(fs machinery.Filesystem) { + s.fs = fs +} + +// Scaffold implements plugins.Scaffolder +func (s *apiScaffolder) Scaffold() error { + if err := s.config.UpdateResource(s.resource); err != nil { + return err + } + + // Initialize the machinery.Scaffold that will write the files to disk + scaffold := machinery.NewScaffold(s.fs, + // NOTE: kubebuilder's default permissions are only for root users + machinery.WithDirectoryPermissions(0755), + machinery.WithFilePermissions(0644), + machinery.WithConfig(s.config), + machinery.WithResource(&s.resource), + ) + + createAPITemplates := []machinery.Builder{ + &rbac.ManagerRoleUpdater{}, + &crd.CRD{}, + &crd.Kustomization{}, + &templates.WatchesUpdater{ + GeneratePlaybook: s.doPlaybook, + GenerateRole: s.doRole, + PlaybooksDir: constants.PlaybooksDir, + }, + &mdefault.ResourceTest{}, + } + + if s.doRole { + createAPITemplates = append(createAPITemplates, + &ansibleroles.TasksMain{}, + &ansibleroles.DefaultsMain{}, + &ansibleroles.RoleFiles{}, + &ansibleroles.HandlersMain{}, + &ansibleroles.MetaMain{}, + &ansibleroles.RoleTemplates{}, + &ansibleroles.VarsMain{}, + &ansibleroles.Readme{}, + ) + } + + if s.doPlaybook { + createAPITemplates = append(createAPITemplates, + &playbooks.Playbook{GenerateRole: s.doRole}, + ) + } + + return scaffold.Execute(createAPITemplates...) +} diff --git a/internal/plugins/ansible/v1/scaffolds/init.go b/internal/plugins/ansible/v1/scaffolds/init.go new file mode 100644 index 0000000..b6c7588 --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/init.go @@ -0,0 +1,103 @@ +/* +Copyright 2019 The Kubernetes Authors. +Modifications copyright 2020 The Operator-SDK Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scaffolds + +import ( + "sigs.k8s.io/kubebuilder/v3/pkg/config" + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" + "sigs.k8s.io/kubebuilder/v3/pkg/plugins" + kustomizev2Alpha "sigs.k8s.io/kubebuilder/v3/pkg/plugins/common/kustomize/v2-alpha" + + "github.com/operator-framework/ansible-operator-plugins/internal/plugins/ansible/v1/scaffolds/internal/templates" + "github.com/operator-framework/ansible-operator-plugins/internal/plugins/ansible/v1/scaffolds/internal/templates/config/rbac" + "github.com/operator-framework/ansible-operator-plugins/internal/plugins/ansible/v1/scaffolds/internal/templates/config/testing" + "github.com/operator-framework/ansible-operator-plugins/internal/plugins/ansible/v1/scaffolds/internal/templates/config/testing/pullpolicy" + "github.com/operator-framework/ansible-operator-plugins/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault" + "github.com/operator-framework/ansible-operator-plugins/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mkind" + "github.com/operator-framework/ansible-operator-plugins/internal/plugins/ansible/v1/scaffolds/internal/templates/playbooks" + "github.com/operator-framework/ansible-operator-plugins/internal/plugins/ansible/v1/scaffolds/internal/templates/roles" + "github.com/operator-framework/ansible-operator-plugins/internal/version" +) + +const imageName = "controller:latest" + +// ansibleOperatorVersion is set to the version of ansible-operator at compile-time. +var ansibleOperatorVersion = version.ImageVersion + +var _ plugins.Scaffolder = &initScaffolder{} + +type initScaffolder struct { + fs machinery.Filesystem + + config config.Config +} + +// NewInitScaffolder returns a new plugins.Scaffolder for project initialization operations +func NewInitScaffolder(config config.Config) plugins.Scaffolder { + return &initScaffolder{ + config: config, + } +} + +// InjectFS implements plugins.Scaffolder +func (s *initScaffolder) InjectFS(fs machinery.Filesystem) { + s.fs = fs +} + +// Scaffold implements plugins.Scaffolder +func (s *initScaffolder) Scaffold() error { + // Initialize the machinery.Scaffold that will write the files to disk + scaffold := machinery.NewScaffold(s.fs, + // NOTE: kubebuilder's default permissions are only for root users + machinery.WithDirectoryPermissions(0755), + machinery.WithFilePermissions(0644), + machinery.WithConfig(s.config), + ) + + return scaffold.Execute( + &templates.Dockerfile{AnsibleOperatorVersion: ansibleOperatorVersion}, + &templates.Makefile{ + Image: imageName, + KustomizeVersion: kustomizev2Alpha.KustomizeVersion, + AnsibleOperatorVersion: ansibleOperatorVersion, + }, + &templates.GitIgnore{}, + &templates.RequirementsYml{}, + &templates.Watches{}, + &rbac.ManagerRole{}, + &roles.Placeholder{}, + &playbooks.Placeholder{}, + &mdefault.Converge{}, + &mdefault.Create{}, + &mdefault.Destroy{}, + &mdefault.Kustomize{}, + &mdefault.Molecule{}, + &mdefault.Prepare{}, + &mdefault.Verify{}, + &mkind.Converge{}, + &mkind.Create{}, + &mkind.Destroy{}, + &mkind.Molecule{}, + &pullpolicy.AlwaysPullPatch{}, + &pullpolicy.IfNotPresentPullPatch{}, + &pullpolicy.NeverPullPatch{}, + &testing.DebugLogsPatch{}, + &testing.Kustomization{}, + &testing.ManagerImage{}, + ) +} diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/config/crd/crd.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/config/crd/crd.go new file mode 100644 index 0000000..85f81da --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/config/crd/crd.go @@ -0,0 +1,107 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crd + +import ( + "fmt" + "path/filepath" + + "github.com/kr/text" + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" +) + +var _ machinery.Template = &CRD{} + +// CRD scaffolds a manifest for CRD sample. +type CRD struct { + machinery.TemplateMixin + machinery.ResourceMixin +} + +// SetTemplateDefaults implements machinery.Template +func (f *CRD) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = filepath.Join("config", "crd", "bases", fmt.Sprintf("%s_%%[plural].yaml", f.Resource.QualifiedGroup())) + } + f.Path = f.Resource.Replacer().Replace(f.Path) + + f.IfExistsAction = machinery.Error + + f.TemplateBody = fmt.Sprintf(crdTemplate, + text.Indent(openAPIV3SchemaTemplate, " "), + text.Indent(openAPIV3SchemaTemplate, " "), + ) + + return nil +} + +const crdTemplate = `--- +apiVersion: apiextensions.k8s.io/{{ .Resource.API.CRDVersion }} +kind: CustomResourceDefinition +metadata: + name: {{ .Resource.Plural }}.{{ .Resource.QualifiedGroup }} +spec: + group: {{ .Resource.QualifiedGroup }} + names: + kind: {{ .Resource.Kind }} + listKind: {{ .Resource.Kind }}List + plural: {{ .Resource.Plural }} + singular: {{ .Resource.Kind | lower }} + scope: Namespaced +{{- if eq .Resource.API.CRDVersion "v1beta1" }} + subresources: + status: {} + validation: +%s +{{- end }} + versions: + - name: {{ .Resource.Version }} +{{- if eq .Resource.API.CRDVersion "v1" }} + schema: +%s +{{- end }} + served: true + storage: true +{{- if eq .Resource.API.CRDVersion "v1" }} + subresources: + status: {} +{{- end }} +` + +const openAPIV3SchemaTemplate = `openAPIV3Schema: + description: {{ .Resource.Kind }} is the Schema for the {{ .Resource.Plural }} API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the desired state of {{ .Resource.Kind }} + type: object + x-kubernetes-preserve-unknown-fields: true + status: + description: Status defines the observed state of {{ .Resource.Kind }} + type: object + x-kubernetes-preserve-unknown-fields: true + type: object +` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/config/crd/kustomization.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/config/crd/kustomization.go new file mode 100644 index 0000000..871c00e --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/config/crd/kustomization.go @@ -0,0 +1,80 @@ +/* +Copyright 2019 The Kubernetes Authors. +Modifications copyright 2020 The Operator-SDK Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package crd + +import ( + "fmt" + "path/filepath" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" +) + +var ( + _ machinery.Template = &Kustomization{} + _ machinery.Inserter = &Kustomization{} +) + +// Kustomization scaffolds the kustomization file in manager folder. +type Kustomization struct { + machinery.TemplateMixin + machinery.ResourceMixin +} + +// SetTemplateDefaults implements machinery.Template +func (f *Kustomization) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = filepath.Join("config", "crd", "kustomization.yaml") + } + + f.TemplateBody = fmt.Sprintf(kustomizationTemplate, machinery.NewMarkerFor(f.Path, resourceMarker)) + + return nil +} + +const ( + resourceMarker = "crdkustomizeresource" +) + +// GetMarkers implements machinery.Inserter +func (f *Kustomization) GetMarkers() []machinery.Marker { + return []machinery.Marker{ + machinery.NewMarkerFor(f.Path, resourceMarker), + } +} + +const ( + resourceCodeFragment = `- bases/%s_%s.yaml +` +) + +// GetCodeFragments implements machinery.Inserter +func (f *Kustomization) GetCodeFragments() machinery.CodeFragmentsMap { + return machinery.CodeFragmentsMap{ + // Generate resource code fragments + machinery.NewMarkerFor(f.Path, resourceMarker): []string{ + fmt.Sprintf(resourceCodeFragment, f.Resource.QualifiedGroup(), f.Resource.Plural), + }, + } +} + +var kustomizationTemplate = `# This kustomization.yaml is not intended to be run by itself, +# since it depends on service name and namespace that are out of this kustomize package. +# It should be run by config/default +resources: +%s +` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/config/rbac/role.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/config/rbac/role.go new file mode 100644 index 0000000..e7cc3be --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/config/rbac/role.go @@ -0,0 +1,155 @@ +// Copyright 2019 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rbac + +import ( + "bytes" + "fmt" + "path/filepath" + "text/template" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" +) + +var _ machinery.Template = &ManagerRole{} + +var defaultRoleFile = filepath.Join("config", "rbac", "role.yaml") + +// ManagerRole scaffolds the role.yaml file +type ManagerRole struct { + machinery.TemplateMixin +} + +// SetTemplateDefaults implements machinery.Template +func (f *ManagerRole) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = defaultRoleFile + } + + f.TemplateBody = fmt.Sprintf(roleTemplate, machinery.NewMarkerFor(f.Path, rulesMarker)) + + return nil +} + +var _ machinery.Inserter = &ManagerRoleUpdater{} + +type ManagerRoleUpdater struct { + machinery.ResourceMixin + + SkipDefaultRules bool +} + +func (*ManagerRoleUpdater) GetPath() string { + return defaultRoleFile +} + +func (*ManagerRoleUpdater) GetIfExistsAction() machinery.IfExistsAction { + return machinery.OverwriteFile +} + +const ( + rulesMarker = "rules" +) + +func (f *ManagerRoleUpdater) GetMarkers() []machinery.Marker { + return []machinery.Marker{ + machinery.NewMarkerFor(defaultRoleFile, rulesMarker), + } +} + +func (f *ManagerRoleUpdater) GetCodeFragments() machinery.CodeFragmentsMap { + fragments := make(machinery.CodeFragmentsMap, 1) + + // If resource is not being provided we are creating the file, not updating it + if f.Resource == nil { + return fragments + } + + buf := &bytes.Buffer{} + tmpl := template.Must(template.New("rules").Parse(rulesFragment)) + err := tmpl.Execute(buf, f) + if err != nil { + panic(err) + } + + // Generate rule fragment + rules := []string{buf.String()} + + if len(rules) != 0 { + fragments[machinery.NewMarkerFor(defaultRoleFile, rulesMarker)] = rules + } + return fragments +} + +const roleTemplate = `--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: manager-role +rules: + ## + ## Base operator rules + ## + - apiGroups: + - "" + resources: + - secrets + - pods + - pods/exec + - pods/log + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +%s +` + +const rulesFragment = ` ## + ## Rules for {{.Resource.QualifiedGroup}}/{{.Resource.Version}}, Kind: {{.Resource.Kind}} + ## + - apiGroups: + - {{.Resource.QualifiedGroup}} + resources: + - {{.Resource.Plural}} + - {{.Resource.Plural}}/status + - {{.Resource.Plural}}/finalizers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/config/testing/debug_logs_patch.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/config/testing/debug_logs_patch.go new file mode 100644 index 0000000..6db9b26 --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/config/testing/debug_logs_patch.go @@ -0,0 +1,58 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testing + +import ( + "path/filepath" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" +) + +var _ machinery.Template = &DebugLogsPatch{} + +// DebugLogsPatch scaffolds the patch file for enabling +// verbose logs during Ansible testing +type DebugLogsPatch struct { + machinery.TemplateMixin +} + +// SetTemplateDefaults implements machinery.Template +func (f *DebugLogsPatch) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = filepath.Join("config", "testing", "debug_logs_patch.yaml") + } + + f.TemplateBody = debugLogsPatchTemplate + + f.IfExistsAction = machinery.Error + + return nil +} + +const debugLogsPatchTemplate = `--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + env: + - name: ANSIBLE_DEBUG_LOGS + value: "TRUE" +` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/config/testing/kustomization.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/config/testing/kustomization.go new file mode 100644 index 0000000..95bbd09 --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/config/testing/kustomization.go @@ -0,0 +1,67 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testing + +import ( + "path/filepath" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" +) + +var _ machinery.Template = &Kustomization{} + +// Kustomization scaffolds the kustomization file for use +// during Ansible testing +type Kustomization struct { + machinery.TemplateMixin +} + +// SetTemplateDefaults implements machinery.Template +func (f *Kustomization) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = filepath.Join("config", "testing", "kustomization.yaml") + } + + f.TemplateBody = KustomizationTemplate + + f.IfExistsAction = machinery.Error + + return nil +} + +const KustomizationTemplate = `# Adds namespace to all resources. +namespace: osdk-test + +namePrefix: osdk- + +# Labels to add to all resources and selectors. +#commonLabels: +# someName: someValue + +patchesStrategicMerge: +- manager_image.yaml +- debug_logs_patch.yaml +- ../default/manager_auth_proxy_patch.yaml + +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- ../crd +- ../rbac +- ../manager +images: +- name: testing + newName: testing-operator +` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/config/testing/manager_image.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/config/testing/manager_image.go new file mode 100644 index 0000000..5940651 --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/config/testing/manager_image.go @@ -0,0 +1,56 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testing + +import ( + "path/filepath" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" +) + +var _ machinery.Template = &ManagerImage{} + +// ManagerImage scaffolds the patch file for overriding the +// default image during Ansible testing +type ManagerImage struct { + machinery.TemplateMixin +} + +// SetTemplateDefaults implements machinery.Template +func (f *ManagerImage) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = filepath.Join("config", "testing", "manager_image.yaml") + } + + f.TemplateBody = managerImageTemplate + + f.IfExistsAction = machinery.Error + + return nil +} + +const managerImageTemplate = `--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + image: testing +` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/config/testing/pullpolicy/always_pull_patch.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/config/testing/pullpolicy/always_pull_patch.go new file mode 100644 index 0000000..a84c7bd --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/config/testing/pullpolicy/always_pull_patch.go @@ -0,0 +1,56 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pullpolicy + +import ( + "path/filepath" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" +) + +var _ machinery.Template = &AlwaysPullPatch{} + +// AlwaysPullPatch scaffolds the patch file for overriding the +// default image pull policy during Ansible testing +type AlwaysPullPatch struct { + machinery.TemplateMixin +} + +// SetTemplateDefaults implements machinery.Template +func (f *AlwaysPullPatch) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = filepath.Join("config", "testing", "pull_policy", "Always.yaml") + } + + f.TemplateBody = alwaysPullPatchTemplate + + f.IfExistsAction = machinery.Error + + return nil +} + +const alwaysPullPatchTemplate = `--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + imagePullPolicy: Always +` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/config/testing/pullpolicy/ifnotpresent_pull_patch.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/config/testing/pullpolicy/ifnotpresent_pull_patch.go new file mode 100644 index 0000000..6cee22a --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/config/testing/pullpolicy/ifnotpresent_pull_patch.go @@ -0,0 +1,56 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pullpolicy + +import ( + "path/filepath" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" +) + +var _ machinery.Template = &IfNotPresentPullPatch{} + +// IfNotPresentPullPatch scaffolds the patch file for overriding the +// default image pull policy during Ansible testing +type IfNotPresentPullPatch struct { + machinery.TemplateMixin +} + +// SetTemplateDefaults implements machinery.Template +func (f *IfNotPresentPullPatch) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = filepath.Join("config", "testing", "pull_policy", "IfNotPresent.yaml") + } + + f.TemplateBody = ifNotPresentPullPatchTemplate + + f.IfExistsAction = machinery.Error + + return nil +} + +const ifNotPresentPullPatchTemplate = `--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + imagePullPolicy: IfNotPresent +` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/config/testing/pullpolicy/never_pull_patch.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/config/testing/pullpolicy/never_pull_patch.go new file mode 100644 index 0000000..197b661 --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/config/testing/pullpolicy/never_pull_patch.go @@ -0,0 +1,56 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pullpolicy + +import ( + "path/filepath" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" +) + +var _ machinery.Template = &NeverPullPatch{} + +// NeverPullPatch scaffolds the patch file for overriding the +// default image pull policy during Ansible testing +type NeverPullPatch struct { + machinery.TemplateMixin +} + +// SetTemplateDefaults implements machinery.Template +func (f *NeverPullPatch) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = filepath.Join("config", "testing", "pull_policy", "Never.yaml") + } + + f.TemplateBody = neverPullPatchTemplate + + f.IfExistsAction = machinery.Error + + return nil +} + +const neverPullPatchTemplate = `--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + imagePullPolicy: Never +` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/dockerfile.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/dockerfile.go new file mode 100644 index 0000000..5227ec1 --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/dockerfile.go @@ -0,0 +1,66 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package templates + +import ( + "errors" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" + + "github.com/operator-framework/ansible-operator-plugins/internal/plugins/ansible/v1/constants" +) + +var _ machinery.Template = &Dockerfile{} + +// Dockerfile scaffolds a Dockerfile for building a main +type Dockerfile struct { + machinery.TemplateMixin + + // AnsibleOperatorVersion is the version of the Dockerfile's base image. + AnsibleOperatorVersion string + + // These variables are always overwritten. + RolesDir string + PlaybooksDir string +} + +// SetTemplateDefaults implements machinery.Template +func (f *Dockerfile) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = "Dockerfile" + } + + f.TemplateBody = dockerfileTemplate + + if f.AnsibleOperatorVersion == "" { + return errors.New("ansible-operator version is required in scaffold") + } + + f.RolesDir = constants.RolesDir + f.PlaybooksDir = constants.PlaybooksDir + + return nil +} + +const dockerfileTemplate = `FROM quay.io/operator-framework/ansible-operator:{{ .AnsibleOperatorVersion }} + +COPY requirements.yml ${HOME}/requirements.yml +RUN ansible-galaxy collection install -r ${HOME}/requirements.yml \ + && chmod -R ug+rwx ${HOME}/.ansible + +COPY watches.yaml ${HOME}/watches.yaml +COPY {{ .RolesDir }}/ ${HOME}/{{ .RolesDir }}/ +COPY {{ .PlaybooksDir }}/ ${HOME}/{{ .PlaybooksDir }}/ +` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/gitignore.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/gitignore.go new file mode 100644 index 0000000..0d7290b --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/gitignore.go @@ -0,0 +1,56 @@ +/* +Copyright 2018 The Kubernetes Authors. +Modifications copyright 2020 The Operator-SDK Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package templates + +import ( + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" +) + +var _ machinery.Template = &GitIgnore{} + +// GitIgnore scaffolds the .gitignore file +type GitIgnore struct { + machinery.TemplateMixin +} + +// SetTemplateDefaults implements machinery.Template +func (f *GitIgnore) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = ".gitignore" + } + + f.TemplateBody = gitignoreTemplate + + return nil +} + +const gitignoreTemplate = ` +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +bin + +# editor and IDE paraphernalia +.idea +*.swp +*.swo +*~ +` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/makefile.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/makefile.go new file mode 100644 index 0000000..07ffb2a --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/makefile.go @@ -0,0 +1,176 @@ +/* +Copyright 2019 The Kubernetes Authors. +Modifications copyright 2020 The Operator-SDK Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package templates + +import ( + "errors" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" +) + +var _ machinery.Template = &Makefile{} + +// Makefile scaffolds the Makefile +type Makefile struct { + machinery.TemplateMixin + + // Image is controller manager image name + Image string + + // Kustomize version to use in the project + KustomizeVersion string + + // AnsibleOperatorVersion is the version of the ansible-operator binary downloaded by the Makefile. + AnsibleOperatorVersion string +} + +// SetTemplateDefaults implements machinery.Template +func (f *Makefile) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = "Makefile" + } + + f.TemplateBody = makefileTemplate + + f.IfExistsAction = machinery.Error + + if f.Image == "" { + f.Image = "controller:latest" + } + + if f.KustomizeVersion == "" { + return errors.New("kustomize version is required in scaffold") + } + + if f.AnsibleOperatorVersion == "" { + return errors.New("ansible-operator version is required in scaffold") + } + + return nil +} + +const makefileTemplate = ` +# Image URL to use all building/pushing image targets +IMG ?= {{ .Image }} + +.PHONY: all +all: docker-build + +##@ General + +# The help target prints out all targets with their descriptions organized +# beneath their categories. The categories are represented by '##@' and the +# target descriptions by '##'. The awk commands is responsible for reading the +# entire set of makefiles included in this invocation, looking for lines of the +# file as xyz: ## something, and then pretty-format the target and help. Then, +# if there's a line with ##@ something, that gets pretty-printed as a category. +# More info on the usage of ANSI control characters for terminal formatting: +# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters +# More info on the awk command: +# http://linuxcommand.org/lc3_adv_awk.php + +.PHONY: help +help: ## Display this help. + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +##@ Build + +.PHONY: run +ANSIBLE_ROLES_PATH?="$(shell pwd)/roles" +run: ansible-operator ## Run against the configured Kubernetes cluster in ~/.kube/config + $(ANSIBLE_OPERATOR) run + +.PHONY: docker-build +docker-build: ## Build docker image with the manager. + docker build -t ${IMG} . + +.PHONY: docker-push +docker-push: ## Push docker image with the manager. + docker push ${IMG} + +# PLATFORMS defines the target platforms for the manager image be build to provide support to multiple +# architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: +# - able to use docker buildx . More info: https://docs.docker.com/build/buildx/ +# - have enable BuildKit, More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +# - be able to push the image for your registry (i.e. if you do not inform a valid value via IMG=> than the export will fail) +# To properly provided solutions that supports more than one platform you should use this option. +PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le +.PHONY: docker-buildx +docker-buildx: test ## Build and push docker image for the manager for cross-platform support + # copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile + sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross + - docker buildx create --name project-v3-builder + docker buildx use project-v3-builder + - docker buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross . + - docker buildx rm project-v3-builder + rm Dockerfile.cross + +##@ Deployment + +.PHONY: install +install: kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. + $(KUSTOMIZE) build config/crd | kubectl apply -f - + +.PHONY: uninstall +uninstall: kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. + $(KUSTOMIZE) build config/crd | kubectl delete -f - + +.PHONY: deploy +deploy: kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. + cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} + $(KUSTOMIZE) build config/default | kubectl apply -f - + +.PHONY: undeploy +undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. + $(KUSTOMIZE) build config/default | kubectl delete -f - + +OS := $(shell uname -s | tr '[:upper:]' '[:lower:]') +ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/') + +.PHONY: kustomize +KUSTOMIZE = $(shell pwd)/bin/kustomize +kustomize: ## Download kustomize locally if necessary. +ifeq (,$(wildcard $(KUSTOMIZE))) +ifeq (,$(shell which kustomize 2>/dev/null)) + @{ \ + set -e ;\ + mkdir -p $(dir $(KUSTOMIZE)) ;\ + curl -sSLo - https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/{{ .KustomizeVersion }}/kustomize_{{ .KustomizeVersion }}_$(OS)_$(ARCH).tar.gz | \ + tar xzf - -C bin/ ;\ + } +else +KUSTOMIZE = $(shell which kustomize) +endif +endif + +.PHONY: ansible-operator +ANSIBLE_OPERATOR = $(shell pwd)/bin/ansible-operator +ansible-operator: ## Download ansible-operator locally if necessary, preferring the $(pwd)/bin path over global if both exist. +ifeq (,$(wildcard $(ANSIBLE_OPERATOR))) +ifeq (,$(shell which ansible-operator 2>/dev/null)) + @{ \ + set -e ;\ + mkdir -p $(dir $(ANSIBLE_OPERATOR)) ;\ + curl -sSLo $(ANSIBLE_OPERATOR) https://github.com/operator-framework/ansible-operator-plugins/releases/download/{{ .AnsibleOperatorVersion }}/ansible-operator_$(OS)_$(ARCH) ;\ + chmod +x $(ANSIBLE_OPERATOR) ;\ + } +else +ANSIBLE_OPERATOR = $(shell which ansible-operator) +endif +endif +` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault/converge.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault/converge.go new file mode 100644 index 0000000..fdc1961 --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault/converge.go @@ -0,0 +1,57 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mdefault + +import ( + "path/filepath" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" +) + +var _ machinery.Template = &Converge{} + +// Converge scaffolds a Converge for building a main +type Converge struct { + machinery.TemplateMixin +} + +// SetTemplateDefaults implements machinery.Template +func (f *Converge) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = filepath.Join("molecule", "default", "converge.yml") + } + f.TemplateBody = convergeTemplate + return nil +} + +const convergeTemplate = `--- +- name: Converge + hosts: localhost + connection: local + gather_facts: no + collections: + - kubernetes.core + + tasks: + - name: Create Namespace + k8s: + api_version: v1 + kind: Namespace + name: '{{ "{{ namespace }}" }}' + + - import_tasks: kustomize.yml + vars: + state: present +` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault/create.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault/create.go new file mode 100644 index 0000000..03f254d --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault/create.go @@ -0,0 +1,45 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mdefault + +import ( + "path/filepath" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" +) + +var _ machinery.Template = &Create{} + +// Create scaffolds a Create for building a main +type Create struct { + machinery.TemplateMixin +} + +// SetTemplateDefaults implements machinery.Template +func (f *Create) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = filepath.Join("molecule", "default", "create.yml") + } + f.TemplateBody = createTemplate + return nil +} + +const createTemplate = `--- +- name: Create + hosts: localhost + connection: local + gather_facts: false + tasks: [] +` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault/destroy.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault/destroy.go new file mode 100644 index 0000000..472cf21 --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault/destroy.go @@ -0,0 +1,63 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mdefault + +import ( + "path/filepath" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" +) + +var _ machinery.Template = &Destroy{} + +// Destroy scaffolds a Destroy for building a main +type Destroy struct { + machinery.TemplateMixin +} + +// SetTemplateDefaults implements machinery.Template +func (f *Destroy) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = filepath.Join("molecule", "default", "destroy.yml") + } + f.TemplateBody = destroyTemplate + return nil +} + +const destroyTemplate = `--- +- name: Destroy + hosts: localhost + connection: local + gather_facts: false + collections: + - kubernetes.core + + tasks: + - import_tasks: kustomize.yml + vars: + state: absent + + - name: Destroy Namespace + k8s: + api_version: v1 + kind: Namespace + name: '{{ "{{ namespace }}" }}' + state: absent + + - name: Unset pull policy + command: '{{ "{{ kustomize }}" }} edit remove patch pull_policy/{{ "{{ operator_pull_policy }}" }}.yaml' + args: + chdir: '{{ "{{ config_dir }}" }}/testing' +` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault/kustomize.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault/kustomize.go new file mode 100644 index 0000000..24f9c4f --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault/kustomize.go @@ -0,0 +1,61 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mdefault + +import ( + "path/filepath" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" +) + +var _ machinery.Template = &Kustomize{} + +// Kustomize scaffolds a Kustomize for building a main +type Kustomize struct { + machinery.TemplateMixin +} + +// SetTemplateDefaults implements machinery.Template +func (f *Kustomize) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = filepath.Join("molecule", "default", "kustomize.yml") + } + f.TemplateBody = kustomizeTemplate + return nil +} + +const kustomizeTemplate = `--- +- name: Build kustomize testing overlay + # load_restrictor must be set to none so we can load patch files from the default overlay + command: '{{ "{{ kustomize }}" }} build --load-restrictor LoadRestrictionsNone' + args: + chdir: '{{ "{{ config_dir }}" }}/testing' + register: resources + changed_when: false + +- name: Set resources to {{ "{{ state }}" }} + k8s: + definition: '{{ "{{ item }}" }}' + state: '{{ "{{ state }}" }}' + wait: no + loop: '{{ "{{ resources.stdout | from_yaml_all | list }}" }}' + +- name: Wait for resources to get to {{ "{{ state }}" }} + k8s: + definition: '{{ "{{ item }}" }}' + state: '{{ "{{ state }}" }}' + wait: yes + loop: '{{ "{{ resources.stdout | from_yaml_all | list }}" }}' +` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault/molecule.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault/molecule.go new file mode 100644 index 0000000..e35353d --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault/molecule.go @@ -0,0 +1,66 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mdefault + +import ( + "path/filepath" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" +) + +var _ machinery.Template = &Molecule{} + +// Molecule scaffolds a Molecule for building a main +type Molecule struct { + machinery.TemplateMixin +} + +// SetTemplateDefaults implements machinery.Template +func (f *Molecule) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = filepath.Join("molecule", "default", "molecule.yml") + } + f.TemplateBody = moleculeTemplate + return nil +} + +const moleculeTemplate = `--- +dependency: + name: galaxy +driver: + name: delegated +platforms: + - name: cluster + groups: + - k8s +provisioner: + name: ansible + inventory: + group_vars: + all: + namespace: ${TEST_OPERATOR_NAMESPACE:-osdk-test} + host_vars: + localhost: + ansible_python_interpreter: '{{ "{{ ansible_playbook_python }}" }}' + config_dir: ${MOLECULE_PROJECT_DIRECTORY}/config + samples_dir: ${MOLECULE_PROJECT_DIRECTORY}/config/samples + operator_image: ${OPERATOR_IMAGE:-""} + operator_pull_policy: ${OPERATOR_PULL_POLICY:-"Always"} + kustomize: ${KUSTOMIZE_PATH:-kustomize} + env: + K8S_AUTH_KUBECONFIG: ${KUBECONFIG:-"~/.kube/config"} +verifier: + name: ansible +` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault/prepare.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault/prepare.go new file mode 100644 index 0000000..32ce9de --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault/prepare.go @@ -0,0 +1,67 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mdefault + +import ( + "path/filepath" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" +) + +var _ machinery.Template = &Prepare{} + +// Prepare scaffolds a Prepare for building a main +type Prepare struct { + machinery.TemplateMixin +} + +// SetTemplateDefaults implements machinery.Template +func (f *Prepare) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = filepath.Join("molecule", "default", "prepare.yml") + } + f.TemplateBody = prepareTemplate + return nil +} + +const prepareTemplate = `--- +- name: Prepare + hosts: localhost + connection: local + gather_facts: false + + tasks: + - name: Ensure operator image is set + fail: + msg: | + You must specify the OPERATOR_IMAGE environment variable in order to run the + 'default' scenario + when: not operator_image + + - name: Set testing image + command: '{{ "{{ kustomize }}" }} edit set image testing={{ "{{ operator_image }}" }}' + args: + chdir: '{{ "{{ config_dir }}" }}/testing' + + - name: Set pull policy + command: '{{ "{{ kustomize }}" }} edit add patch --path pull_policy/{{ "{{ operator_pull_policy }}" }}.yaml' + args: + chdir: '{{ "{{ config_dir }}" }}/testing' + + - name: Set testing namespace + command: '{{ "{{ kustomize }}" }} edit set namespace {{ "{{ namespace }}" }}' + args: + chdir: '{{ "{{ config_dir }}" }}/testing' +` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault/tasks_test_resource.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault/tasks_test_resource.go new file mode 100644 index 0000000..bfb1346 --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault/tasks_test_resource.go @@ -0,0 +1,62 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mdefault + +import ( + "path/filepath" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" +) + +var _ machinery.Template = &ResourceTest{} + +// ResourceTest scaffolds a ResourceTest for building a main +type ResourceTest struct { + machinery.TemplateMixin + machinery.ResourceMixin + SampleFile string +} + +// SetTemplateDefaults implements machinery.Template +func (f *ResourceTest) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = filepath.Join("molecule", "default", "tasks", "%[kind]_test.yml") + f.Path = f.Resource.Replacer().Replace(f.Path) + } + f.SampleFile = f.Resource.Replacer().Replace("%[group]_%[version]_%[kind].yaml") + + f.TemplateBody = resourceTestTemplate + return nil +} + +const resourceTestTemplate = `--- +- name: Create the {{ .Resource.QualifiedGroup }}/{{ .Resource.Version }}.{{ .Resource.Kind }} + k8s: + state: present + namespace: '{{ "{{ namespace }}" }}' + definition: "{{ "{{ lookup('template', '/'.join([samples_dir, cr_file])) | from_yaml }}" }}" + wait: yes + wait_timeout: 300 + wait_condition: + type: Successful + status: "True" + vars: + cr_file: '{{ .SampleFile }}' + +- name: Add assertions here + assert: + that: false + fail_msg: FIXME Add real assertions for your operator +` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault/verify.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault/verify.go new file mode 100644 index 0000000..07ce387 --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mdefault/verify.go @@ -0,0 +1,96 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mdefault + +import ( + "path/filepath" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" +) + +var _ machinery.Template = &Verify{} + +// Verify scaffolds a Verify for building a main +type Verify struct { + machinery.TemplateMixin +} + +// SetTemplateDefaults implements machinery.Template +func (f *Verify) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = filepath.Join("molecule", "default", "verify.yml") + } + f.TemplateBody = verifyTemplate + return nil +} + +const verifyTemplate = `--- +- name: Verify + hosts: localhost + connection: local + gather_facts: no + collections: + - kubernetes.core + + vars: + ctrl_label: control-plane=controller-manager + + tasks: + - block: + - name: Import all test files from tasks/ + include_tasks: '{{ "{{ item }}" }}' + with_fileglob: + - tasks/*_test.yml + rescue: + - name: Retrieve relevant resources + k8s_info: + api_version: '{{ "{{ item.api_version }}" }}' + kind: '{{ "{{ item.kind }}" }}' + namespace: '{{ "{{ namespace }}" }}' + loop: + - api_version: v1 + kind: Pod + - api_version: apps/v1 + kind: Deployment + - api_version: v1 + kind: Secret + - api_version: v1 + kind: ConfigMap + register: debug_resources + + - name: Retrieve Pod logs + k8s_log: + name: '{{ "{{ item.metadata.name }}" }}' + namespace: '{{ "{{ namespace }}" }}' + container: manager + loop: "{{ "{{ q('k8s', api_version='v1', kind='Pod', namespace=namespace, label_selector=ctrl_label) }}" }}" + register: debug_logs + + - name: Output gathered resources + debug: + var: debug_resources + + - name: Output gathered logs + debug: + var: item.log_lines + loop: '{{ "{{ debug_logs.results }}" }}' + + - name: Re-emit failure + vars: + failed_task: + result: '{{ "{{ ansible_failed_result }}" }}' + fail: + msg: '{{ "{{ failed_task }}" }}' +` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mkind/converge.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mkind/converge.go new file mode 100644 index 0000000..3eb3980 --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mkind/converge.go @@ -0,0 +1,63 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mkind + +import ( + "path/filepath" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" +) + +var _ machinery.Template = &Converge{} + +// Converge scaffolds a Converge for building a main +type Converge struct { + machinery.TemplateMixin +} + +// SetTemplateDefaults implements machinery.Template +func (f *Converge) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = filepath.Join("molecule", "kind", "converge.yml") + } + f.TemplateBody = convergeTemplate + return nil +} + +const convergeTemplate = `--- +- name: Converge + hosts: localhost + connection: local + gather_facts: no + + tasks: + - name: Build operator image + docker_image: + build: + path: '{{ "{{ project_dir }}" }}' + pull: no + name: '{{ "{{ operator_image }}" }}' + tag: latest + push: no + source: build + force_source: yes + + - name: Load image into kind cluster + command: kind load docker-image --name osdk-test '{{ "{{ operator_image }}" }}' + register: result + changed_when: '"not yet present" in result.stdout' + +- import_playbook: ../default/converge.yml +` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mkind/create.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mkind/create.go new file mode 100644 index 0000000..56fd8ba --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mkind/create.go @@ -0,0 +1,47 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mkind + +import ( + "path/filepath" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" +) + +var _ machinery.Template = &Create{} + +// Create scaffolds a Create for building a main +type Create struct { + machinery.TemplateMixin +} + +// SetTemplateDefaults implements machinery.Template +func (f *Create) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = filepath.Join("molecule", "kind", "create.yml") + } + f.TemplateBody = createTemplate + return nil +} + +const createTemplate = `--- +- name: Create + hosts: localhost + connection: local + gather_facts: false + tasks: + - name: Create test kind cluster + command: kind create cluster --name osdk-test --kubeconfig {{ "{{ kubeconfig }}" }} +` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mkind/destroy.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mkind/destroy.go new file mode 100644 index 0000000..086a107 --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mkind/destroy.go @@ -0,0 +1,55 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mkind + +import ( + "path/filepath" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" +) + +var _ machinery.Template = &Destroy{} + +// Destroy scaffolds a Destroy for building a main +type Destroy struct { + machinery.TemplateMixin +} + +// SetTemplateDefaults implements machinery.Template +func (f *Destroy) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = filepath.Join("molecule", "kind", "destroy.yml") + } + f.TemplateBody = destroyTemplate + return nil +} + +const destroyTemplate = `--- +- name: Destroy + hosts: localhost + connection: local + gather_facts: false + collections: + - kubernetes.core + + tasks: + - name: Destroy test kind cluster + command: kind delete cluster --name osdk-test --kubeconfig {{ "{{ kubeconfig }}" }} + + - name: Unset pull policy + command: '{{ "{{ kustomize }}" }} edit remove patch pull_policy/{{ "{{ operator_pull_policy }}" }}.yaml' + args: + chdir: '{{ "{{ config_dir }}" }}/testing' +` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mkind/molecule.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mkind/molecule.go new file mode 100644 index 0000000..d0e6c9d --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/molecule/mkind/molecule.go @@ -0,0 +1,72 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mkind + +import ( + "path/filepath" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" +) + +var _ machinery.Template = &Molecule{} + +// Molecule scaffolds a Molecule for building a main +type Molecule struct { + machinery.TemplateMixin +} + +// SetTemplateDefaults implements machinery.Template +func (f *Molecule) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = filepath.Join("molecule", "kind", "molecule.yml") + } + f.TemplateBody = moleculeTemplate + return nil +} + +const moleculeTemplate = `--- +dependency: + name: galaxy +driver: + name: delegated +platforms: + - name: cluster + groups: + - k8s +provisioner: + name: ansible + playbooks: + prepare: ../default/prepare.yml + verify: ../default/verify.yml + inventory: + group_vars: + all: + namespace: ${TEST_OPERATOR_NAMESPACE:-osdk-test} + host_vars: + localhost: + ansible_python_interpreter: '{{ "{{ ansible_playbook_python }}" }}' + config_dir: ${MOLECULE_PROJECT_DIRECTORY}/config + samples_dir: ${MOLECULE_PROJECT_DIRECTORY}/config/samples + project_dir: ${MOLECULE_PROJECT_DIRECTORY} + operator_image: testing-operator + operator_pull_policy: "Never" + kubeconfig: "{{ "{{ lookup('env', 'KUBECONFIG') }}" }}" + kustomize: ${KUSTOMIZE_PATH:-kustomize} + env: + K8S_AUTH_KUBECONFIG: ${MOLECULE_EPHEMERAL_DIRECTORY}/kubeconfig + KUBECONFIG: ${MOLECULE_EPHEMERAL_DIRECTORY}/kubeconfig +verifier: + name: ansible +` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/playbooks/placeholder.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/playbooks/placeholder.go new file mode 100644 index 0000000..20dc757 --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/playbooks/placeholder.go @@ -0,0 +1,38 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package playbooks + +import ( + "path/filepath" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" +) + +var _ machinery.Template = &Placeholder{} + +type Placeholder struct { + machinery.TemplateMixin +} + +// SetTemplateDefaults implements machinery.Template +func (f *Placeholder) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = filepath.Join("playbooks", ".placeholder") + } + f.TemplateBody = placeholderTemplate + return nil +} + +const placeholderTemplate = `` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/playbooks/playbook.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/playbooks/playbook.go new file mode 100644 index 0000000..3e68b7d --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/playbooks/playbook.go @@ -0,0 +1,57 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package playbooks + +import ( + "path/filepath" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" +) + +var _ machinery.Template = &Playbook{} + +type Playbook struct { + machinery.TemplateMixin + machinery.ResourceMixin + + GenerateRole bool +} + +func (f *Playbook) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = filepath.Join("playbooks", "%[kind].yml") + } + f.Path = f.Resource.Replacer().Replace(f.Path) + + f.TemplateBody = playbookTmpl + + return nil +} + +const playbookTmpl = `--- +- hosts: localhost + gather_facts: no + collections: + - kubernetes.core + - operator_sdk.util + + {{- if .GenerateRole }} + tasks: + - import_role: + name: "{{ lower .Resource.Kind }}" + {{- else }} + tasks: [] + {{- end }} +` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/requirements.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/requirements.go new file mode 100644 index 0000000..5679854 --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/requirements.go @@ -0,0 +1,46 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package templates + +import ( + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" +) + +var _ machinery.Template = &RequirementsYml{} + +// RequirementsYml - A requirements file for Ansible collection dependencies +type RequirementsYml struct { + machinery.TemplateMixin +} + +func (f *RequirementsYml) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = "requirements.yml" + } + f.TemplateBody = requirementsYmlTmpl + return nil +} + +const requirementsYmlTmpl = `--- +collections: + - name: operator_sdk.util + version: "0.5.0" + - name: kubernetes.core + version: "2.4.0" + - name: cloud.common + version: "2.1.1" + - name: community.docker + version: "3.4.0" +` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/roles/defaults_main.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/roles/defaults_main.go new file mode 100644 index 0000000..06a2d1e --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/roles/defaults_main.go @@ -0,0 +1,44 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package roles + +import ( + "path/filepath" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" + + "github.com/operator-framework/ansible-operator-plugins/internal/plugins/ansible/v1/constants" +) + +var _ machinery.Template = &DefaultsMain{} + +type DefaultsMain struct { + machinery.TemplateMixin + machinery.ResourceMixin +} + +// SetTemplateDefaults implements machinery.Template +func (f *DefaultsMain) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = filepath.Join(constants.RolesDir, "%[kind]", "defaults", "main.yml") + f.Path = f.Resource.Replacer().Replace(f.Path) + } + f.TemplateBody = defaultsMainAnsibleTmpl + return nil +} + +const defaultsMainAnsibleTmpl = `--- +# defaults file for {{ .Resource.Kind }} +` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/roles/files_dir.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/roles/files_dir.go new file mode 100644 index 0000000..02f607c --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/roles/files_dir.go @@ -0,0 +1,43 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package roles + +import ( + "path/filepath" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" + + "github.com/operator-framework/ansible-operator-plugins/internal/plugins/ansible/v1/constants" +) + +var _ machinery.Template = &RoleFiles{} + +type RoleFiles struct { + machinery.TemplateMixin + machinery.ResourceMixin +} + +// SetTemplateDefaults implements machinery.Template +func (f *RoleFiles) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = filepath.Join(constants.RolesDir, "%[kind]", "files", ".placeholder") + f.Path = f.Resource.Replacer().Replace(f.Path) + } + + f.TemplateBody = rolesFilesDirPlaceholder + return nil +} + +const rolesFilesDirPlaceholder = `` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/roles/handlers_main.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/roles/handlers_main.go new file mode 100644 index 0000000..75e8370 --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/roles/handlers_main.go @@ -0,0 +1,45 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package roles + +import ( + "path/filepath" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" + + "github.com/operator-framework/ansible-operator-plugins/internal/plugins/ansible/v1/constants" +) + +var _ machinery.Template = &HandlersMain{} + +type HandlersMain struct { + machinery.TemplateMixin + machinery.ResourceMixin +} + +// SetTemplateDefaults implements machinery.Template +func (f *HandlersMain) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = filepath.Join(constants.RolesDir, "%[kind]", "handlers", "main.yml") + f.Path = f.Resource.Replacer().Replace(f.Path) + } + + f.TemplateBody = handlersMainAnsibleTmpl + return nil +} + +const handlersMainAnsibleTmpl = `--- +# handlers file for {{ .Resource.Kind }} +` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/roles/meta_main.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/roles/meta_main.go new file mode 100644 index 0000000..3f159e2 --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/roles/meta_main.go @@ -0,0 +1,107 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package roles + +import ( + "path/filepath" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" + + "github.com/operator-framework/ansible-operator-plugins/internal/plugins/ansible/v1/constants" +) + +var _ machinery.Template = &MetaMain{} + +type MetaMain struct { + machinery.TemplateMixin + machinery.ResourceMixin +} + +// SetTemplateDefaults implements machinery.Template +func (f *MetaMain) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = filepath.Join(constants.RolesDir, "%[kind]", "meta", "main.yml") + } + f.Path = f.Resource.Replacer().Replace(f.Path) + + f.TemplateBody = metaMainAnsibleTmpl + return nil +} + +const metaMainAnsibleTmpl = `--- +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: license (GPLv2, CC-BY, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # Optionally specify the branch Galaxy will use when accessing the GitHub + # repo for this role. During role install, if no tags are available, + # Galaxy will use this branch. During import Galaxy will access files on + # this branch. If Travis integration is configured, only notifications for this + # branch will be accepted. Otherwise, in all cases, the repo's default branch + # (usually master) will be used. + #github_branch: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. +collections: +- operator_sdk.util +- kubernetes.core +` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/roles/placeholder.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/roles/placeholder.go new file mode 100644 index 0000000..152ffcb --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/roles/placeholder.go @@ -0,0 +1,38 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package roles + +import ( + "path/filepath" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" +) + +var _ machinery.Template = &Placeholder{} + +type Placeholder struct { + machinery.TemplateMixin +} + +// SetTemplateDefaults implements machinery.Template +func (f *Placeholder) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = filepath.Join("roles", ".placeholder") + } + f.TemplateBody = placeholderTemplate + return nil +} + +const placeholderTemplate = `` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/roles/readme.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/roles/readme.go new file mode 100644 index 0000000..154a2e6 --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/roles/readme.go @@ -0,0 +1,87 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package roles + +import ( + "path/filepath" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" + + "github.com/operator-framework/ansible-operator-plugins/internal/plugins/ansible/v1/constants" +) + +const ReadmePath = "README.md" + +var _ machinery.Template = &Readme{} + +type Readme struct { + machinery.TemplateMixin + machinery.ResourceMixin +} + +func (f *Readme) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = filepath.Join(constants.RolesDir, "%[kind]", ReadmePath) + } + f.Path = f.Resource.Replacer().Replace(f.Path) + + f.TemplateBody = readmeAnsibleTmpl + return nil +} + +const readmeAnsibleTmpl = `Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, +if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in +defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables +that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set +for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for +users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). +` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/roles/tasks_main.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/roles/tasks_main.go new file mode 100644 index 0000000..760f3d7 --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/roles/tasks_main.go @@ -0,0 +1,45 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package roles + +import ( + "path/filepath" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" + + "github.com/operator-framework/ansible-operator-plugins/internal/plugins/ansible/v1/constants" +) + +var _ machinery.Template = &TasksMain{} + +type TasksMain struct { + machinery.TemplateMixin + machinery.ResourceMixin +} + +// SetTemplateDefaults implements machinery.Template +func (f *TasksMain) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = filepath.Join(constants.RolesDir, "%[kind]", "tasks", "main.yml") + f.Path = f.Resource.Replacer().Replace(f.Path) + } + + f.TemplateBody = tasksMainAnsibleTmpl + return nil +} + +const tasksMainAnsibleTmpl = `--- +# tasks file for {{ .Resource.Kind }} +` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/roles/templates_dir.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/roles/templates_dir.go new file mode 100644 index 0000000..cd63197 --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/roles/templates_dir.go @@ -0,0 +1,43 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package roles + +import ( + "path/filepath" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" + + "github.com/operator-framework/ansible-operator-plugins/internal/plugins/ansible/v1/constants" +) + +var _ machinery.Template = &RoleTemplates{} + +type RoleTemplates struct { + machinery.TemplateMixin + machinery.ResourceMixin +} + +// SetTemplateDefaults implements machinery.Template +func (f *RoleTemplates) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = filepath.Join(constants.RolesDir, "%[kind]", "templates", ".placeholder") + } + f.Path = f.Resource.Replacer().Replace(f.Path) + + f.TemplateBody = templatesDirPlaceholder + return nil +} + +const templatesDirPlaceholder = `` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/roles/vars_main.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/roles/vars_main.go new file mode 100644 index 0000000..537c42c --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/roles/vars_main.go @@ -0,0 +1,45 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package roles + +import ( + "path/filepath" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" + + "github.com/operator-framework/ansible-operator-plugins/internal/plugins/ansible/v1/constants" +) + +var _ machinery.Template = &VarsMain{} + +type VarsMain struct { + machinery.TemplateMixin + machinery.ResourceMixin +} + +// SetTemplateDefaults implements machinery.Template +func (f *VarsMain) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = filepath.Join(constants.RolesDir, "%[kind]", "vars", "main.yml") + } + f.Path = f.Resource.Replacer().Replace(f.Path) + + f.TemplateBody = varsMainAnsibleTmpl + return nil +} + +const varsMainAnsibleTmpl = `--- +# vars file for {{ .Resource.Kind }} +` diff --git a/internal/plugins/ansible/v1/scaffolds/internal/templates/watches.go b/internal/plugins/ansible/v1/scaffolds/internal/templates/watches.go new file mode 100644 index 0000000..4fbe188 --- /dev/null +++ b/internal/plugins/ansible/v1/scaffolds/internal/templates/watches.go @@ -0,0 +1,115 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package templates + +import ( + "bytes" + "fmt" + "text/template" + + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" +) + +var _ machinery.Template = &Watches{} + +const ( + defaultWatchesFile = "watches.yaml" + watchMarker = "watch" +) + +// Watches scaffolds the watches.yaml file +type Watches struct { + machinery.TemplateMixin +} + +// SetTemplateDefaults implements machinery.Template +func (f *Watches) SetTemplateDefaults() error { + if f.Path == "" { + f.Path = defaultWatchesFile + } + + f.TemplateBody = fmt.Sprintf(watchesTemplate, + machinery.NewMarkerFor(f.Path, watchMarker), + ) + return nil +} + +var _ machinery.Inserter = &WatchesUpdater{} + +type WatchesUpdater struct { + machinery.ResourceMixin + + GeneratePlaybook bool + GenerateRole bool + PlaybooksDir string +} + +func (*WatchesUpdater) GetPath() string { + return defaultWatchesFile +} + +func (*WatchesUpdater) GetIfExistsAction() machinery.IfExistsAction { + return machinery.OverwriteFile +} + +func (f *WatchesUpdater) GetMarkers() []machinery.Marker { + return []machinery.Marker{ + machinery.NewMarkerFor(defaultWatchesFile, watchMarker), + } +} + +func (f *WatchesUpdater) GetCodeFragments() machinery.CodeFragmentsMap { + fragments := make(machinery.CodeFragmentsMap, 1) + + // If resource is not being provided we are creating the file, not updating it + if f.Resource == nil { + return fragments + } + + // Generate watch fragments + watches := make([]string, 0) + buf := &bytes.Buffer{} + + // TODO(asmacdo) Move template execution into a function, executed by the apiScaffolder.scaffold() + // DefaultFuncMap used provide the function "lower", used in the watch fragment. + tmpl := template.Must(template.New("rules").Funcs(machinery.DefaultFuncMap()).Parse(watchFragment)) + err := tmpl.Execute(buf, f) + if err != nil { + panic(err) + } + watches = append(watches, buf.String()) + + if len(watches) != 0 { + fragments[machinery.NewMarkerFor(defaultWatchesFile, watchMarker)] = watches + } + return fragments +} + +const watchesTemplate = `--- +# Use the 'create api' subcommand to add watches to this file. +%s +` + +const watchFragment = `- version: {{ .Resource.Version }} + group: {{ .Resource.QualifiedGroup }} + kind: {{ .Resource.Kind }} + {{- if .GeneratePlaybook }} + playbook: {{ .PlaybooksDir }}/{{ lower .Resource.Kind }}.yml + {{- else if .GenerateRole}} + role: {{ lower .Resource.Kind }} + {{- else }} + # FIXME: Specify the role or playbook for this resource. + {{- end }} +` diff --git a/internal/plugins/plugins.go b/internal/plugins/plugins.go new file mode 100644 index 0000000..722ee14 --- /dev/null +++ b/internal/plugins/plugins.go @@ -0,0 +1,19 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package plugins + +// DefaultNameQualifier is the Operator SDK plugin name suffix. Appending +// this suffix to a short name, ex. "go", makes it fully qualified. +const DefaultNameQualifier = ".sdk.operatorframework.io" diff --git a/internal/plugins/util/cleanup.go b/internal/plugins/util/cleanup.go new file mode 100644 index 0000000..b704ff4 --- /dev/null +++ b/internal/plugins/util/cleanup.go @@ -0,0 +1,208 @@ +// Copyright 2021 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "bytes" + "errors" + "fmt" + "os" + "path/filepath" + + kbutil "sigs.k8s.io/kubebuilder/v3/pkg/plugin/util" + + log "github.com/sirupsen/logrus" +) + +// RemoveKustomizeCRDManifests removes items in config/crd relating to CRD conversion webhooks. +func RemoveKustomizeCRDManifests() error { + + pathsToRemove := []string{ + filepath.Join("config", "crd", "kustomizeconfig.yaml"), + } + configPatchesDir := filepath.Join("config", "crd", "patches") + webhookPatchMatches, err := filepath.Glob(filepath.Join(configPatchesDir, "webhook_in_*.yaml")) + if err != nil { + return err + } + pathsToRemove = append(pathsToRemove, webhookPatchMatches...) + cainjectionPatchMatches, err := filepath.Glob(filepath.Join(configPatchesDir, "cainjection_in_*.yaml")) + if err != nil { + return err + } + pathsToRemove = append(pathsToRemove, cainjectionPatchMatches...) + for _, p := range pathsToRemove { + if err := os.RemoveAll(p); err != nil { + return err + } + } + children, err := os.ReadDir(configPatchesDir) + if err == nil && len(children) == 0 { + if err := os.RemoveAll(configPatchesDir); err != nil { + return err + } + } + return nil +} + +// UpdateKustomizationsInit updates certain parts of or removes entire kustomization.yaml files +// that are either not used by certain Init plugins or are created by preceding Init plugins. +func UpdateKustomizationsInit() error { + + defaultKFile := filepath.Join("config", "default", "kustomization.yaml") + if err := kbutil.ReplaceInFile(defaultKFile, + ` +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +#- ../webhook +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. +#- ../certmanager`, ""); err != nil { + return fmt.Errorf("remove %s resources: %v", defaultKFile, err) + } + + if err := kbutil.ReplaceInFile(defaultKFile, + ` +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +#- manager_webhook_patch.yaml + +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. +# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. +# 'CERTMANAGER' needs to be enabled to use ca injection +#- webhookcainjection_patch.yaml + +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. +# Uncomment the following replacements to add the cert-manager CA injection annotations +#replacements: +# - source: # Add cert-manager annotation to ValidatingWebhookConfiguration, MutatingWebhookConfiguration and CRDs +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # this name should match the one in certificate.yaml +# fieldPath: .metadata.namespace # namespace of the certificate CR +# targets: +# - select: +# kind: ValidatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - select: +# kind: MutatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - select: +# kind: CustomResourceDefinition +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - source: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # this name should match the one in certificate.yaml +# fieldPath: .metadata.name +# targets: +# - select: +# kind: ValidatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true +# - select: +# kind: MutatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true +# - select: +# kind: CustomResourceDefinition +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true +# - source: # Add cert-manager annotation to the webhook Service +# kind: Service +# version: v1 +# name: webhook-service +# fieldPath: .metadata.name # namespace of the service +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# fieldPaths: +# - .spec.dnsNames.0 +# - .spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 0 +# create: true +# - source: +# kind: Service +# version: v1 +# name: webhook-service +# fieldPath: .metadata.namespace # namespace of the service +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# fieldPaths: +# - .spec.dnsNames.0 +# - .spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 1 +# create: true +`, ""); err != nil { + return fmt.Errorf("remove %s patch and vars blocks: %v", defaultKFile, err) + } + + return nil +} + +// UpdateKustomizationsCreateAPI updates certain parts of or removes entire kustomization.yaml files +// that are either not used by certain CreateAPI plugins or are created by preceding CreateAPI plugins. +func UpdateKustomizationsCreateAPI() error { + + crdKFile := filepath.Join("config", "crd", "kustomization.yaml") + if crdKBytes, err := os.ReadFile(crdKFile); err != nil && !errors.Is(err, os.ErrNotExist) { + log.Debugf("Error reading kustomization for substitution: %v", err) + } else if err == nil { + if bytes.Contains(crdKBytes, []byte("[WEBHOOK]")) || bytes.Contains(crdKBytes, []byte("[CERTMANAGER]")) { + if err := os.RemoveAll(crdKFile); err != nil { + log.Debugf("Error removing file prior to scaffold: %v", err) + } + } + } + + return nil +} diff --git a/internal/plugins/util/legacy_keys.go b/internal/plugins/util/legacy_keys.go new file mode 100644 index 0000000..1e0da04 --- /dev/null +++ b/internal/plugins/util/legacy_keys.go @@ -0,0 +1,93 @@ +// Copyright 2021 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "errors" + + log "github.com/sirupsen/logrus" + gofunk "github.com/thoas/go-funk" + "sigs.k8s.io/kubebuilder/v3/pkg/config" + cfgv3 "sigs.k8s.io/kubebuilder/v3/pkg/config/v3" +) + +const ( + // The catch-all plugin key for the go/v2+manifests+scorecard plugins. + // Should still be accepted for backwards-compat. + legacyGoPluginKey = "go.sdk.operatorframework.io/v2-alpha" + legacyGoPluginAlphaKey = "go.sdk.operatorframework.io/v2-alpha" + + // Hard-code the latest manifests and scorecard keys here to avoid a circular import. + manifestsKey = "manifests.sdk.operatorframework.io/v2" + scorecardKey = "scorecard.sdk.operatorframework.io/v2" +) + +// Plugin keys that existed when manifests/scorecard keys did not. +var acceptedLayoutKeys = []string{ + "ansible.sdk.operatorframework.io/v1", + "helm.sdk.operatorframework.io/v1", +} + +// UpdateIfLegacyKey returns true if c's "plugins" map or "layout" value contains +// a legacy key that may require this plugin be executed, even if the "manifests" key +// isn't in "plugins". +func UpdateIfLegacyKey(c config.Config) bool { + if c.GetVersion().Compare(cfgv3.Version) < 0 { + return false + } + + if IsGolangLegacyLayout(c) { + return true + } + + chain := c.GetPluginChain() + for _, key := range acceptedLayoutKeys { + if gofunk.ContainsString(chain, key) { + // Encode missing plugin keys. + if !gofunk.ContainsString(chain, manifestsKey) { + if err := c.EncodePluginConfig(manifestsKey, struct{}{}); err != nil { + log.Error(err) + } + } + if !gofunk.ContainsString(chain, scorecardKey) { + if err := c.EncodePluginConfig(scorecardKey, struct{}{}); err != nil { + log.Error(err) + } + } + return true + } + } + + return false +} + +// IsGolangLegacyLayout returns true if c's does not have the plugins +// configuration. +func IsGolangLegacyLayout(c config.Config) bool { + err := c.DecodePluginConfig(legacyGoPluginKey, struct{}{}) + if err == nil || !errors.As(err, &config.PluginKeyNotFoundError{}) { + // There is no way to remove keys from "plugins", so print a warning. + log.Warnf("Plugin key %q is deprecated. Replace this key with %q and %q on separate lines.", + legacyGoPluginKey, manifestsKey, scorecardKey) + return true + } + + err = c.DecodePluginConfig(legacyGoPluginAlphaKey, struct{}{}) + if err == nil || !errors.As(err, &config.PluginKeyNotFoundError{}) { + return true + } + + return false +} diff --git a/internal/plugins/util/message.go b/internal/plugins/util/message.go new file mode 100644 index 0000000..bf151c2 --- /dev/null +++ b/internal/plugins/util/message.go @@ -0,0 +1,20 @@ +// Copyright 2021 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +const WarnMessageRemovalV1beta1 = "The v1beta1 API version for CRDs and Webhooks is deprecated and is no longer offered since " + + "Kubernetes 1.22. This flag will be removed in a future release. We " + + "recommend that you no longer use the v1beta1 API version" + + "More info: https://kubernetes.io/docs/reference/using-api/deprecation-guide/#v1-22" diff --git a/internal/testutils/olm.go b/internal/testutils/olm.go new file mode 100644 index 0000000..2846159 --- /dev/null +++ b/internal/testutils/olm.go @@ -0,0 +1,97 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testutils + +import ( + "fmt" + "os" + "path/filepath" + + _ "sigs.k8s.io/kubebuilder/v3/pkg/config/v2" // Register config/v2 for `config.New` + _ "sigs.k8s.io/kubebuilder/v3/pkg/config/v3" // Register config/v3 for `config.New` + + "github.com/operator-framework/ansible-operator-plugins/internal/util/projutil" +) + +const ( + OlmVersionForTestSuite = "0.25.0" +) + +var makefilePackagemanifestsFragment = ` +# Options for "packagemanifests". +ifneq ($(origin FROM_VERSION), undefined) +PKG_FROM_VERSION := --from-version=$(FROM_VERSION) +endif +ifneq ($(origin CHANNEL), undefined) +PKG_CHANNELS := --channel=$(CHANNEL) +endif +ifeq ($(IS_CHANNEL_DEFAULT), 1) +PKG_IS_DEFAULT_CHANNEL := --default-channel +endif +PKG_MAN_OPTS ?= $(PKG_FROM_VERSION) $(PKG_CHANNELS) $(PKG_IS_DEFAULT_CHANNEL) + +# Generate package manifests. +packagemanifests: kustomize %s + $(OPERATOR_SDK) generate kustomize manifests -q --interactive=false + cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG) + $(KUSTOMIZE) build config/manifests | $(OPERATOR_SDK) generate packagemanifests -q --version $(VERSION) $(PKG_MAN_OPTS) +` + +// AddPackagemanifestsTarget will append the packagemanifests target to the makefile +// in order to test the steps described in the docs. +// More info: https://v1-0-x.sdk.operatorframework.io/docs/olm-integration/generation/#package-manifests-formats +func (tc TestContext) AddPackagemanifestsTarget(operatorType projutil.OperatorType) error { + makefileBytes, err := os.ReadFile(filepath.Join(tc.Dir, "Makefile")) + if err != nil { + return err + } + + // add the manifests target when is a Go project. + replaceTarget := "" + if operatorType == projutil.OperatorTypeGo { + replaceTarget = "manifests" + } + makefilePackagemanifestsFragment = fmt.Sprintf(makefilePackagemanifestsFragment, replaceTarget) + + // update makefile by adding the packagemanifests target + makefileBytes = append([]byte(makefilePackagemanifestsFragment), makefileBytes...) + err = os.WriteFile(filepath.Join(tc.Dir, "Makefile"), makefileBytes, 0644) + if err != nil { + return err + } + return nil +} + +// DisableManifestsInteractiveMode will update the Makefile to disable the interactive mode +func (tc TestContext) DisableManifestsInteractiveMode() error { + // Todo: check if we cannot improve it since the replace/content will exists in the + // pkgmanifest target if it be scaffolded before this call + content := "$(OPERATOR_SDK) generate kustomize manifests" + replace := content + " --interactive=false" + return ReplaceInFile(filepath.Join(tc.Dir, "Makefile"), content, replace) +} + +// GenerateBundle runs all commands to create an operator bundle. +func (tc TestContext) GenerateBundle() error { + if err := tc.DisableManifestsInteractiveMode(); err != nil { + return err + } + + if err := tc.Make("bundle", "IMG="+tc.ImageName); err != nil { + return err + } + + return nil +} diff --git a/internal/testutils/scorecard.go b/internal/testutils/scorecard.go new file mode 100644 index 0000000..008e212 --- /dev/null +++ b/internal/testutils/scorecard.go @@ -0,0 +1,103 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Modified from https://github.com/kubernetes-sigs/kubebuilder/tree/39224f0/test/e2e/v3 + +package testutils + +import ( + "fmt" + "os" + "path/filepath" + + kbutil "sigs.k8s.io/kubebuilder/v3/pkg/plugin/util" +) + +const scorecardImage = "quay.io/operator-framework/scorecard-test:.*" +const scorecardImageReplace = "quay.io/operator-framework/scorecard-test:dev" + +const customScorecardPatch = ` +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - custom-scorecard-tests + - customtest1 + image: quay.io/operator-framework/custom-scorecard-tests:dev + labels: + suite: custom + test: customtest1 +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - custom-scorecard-tests + - customtest2 + image: quay.io/operator-framework/custom-scorecard-tests:dev + labels: + suite: custom + test: customtest2 +` + +const customScorecardKustomize = ` +- path: patches/custom.config.yaml + target: + group: scorecard.operatorframework.io + version: v1alpha3 + kind: Configuration + name: config +` + +func (tc TestContext) AddScorecardCustomPatchFile() error { + // drop in the patch file + customScorecardPatchFile := filepath.Join(tc.Dir, "config", "scorecard", "patches", "custom.config.yaml") + patchBytes := []byte(customScorecardPatch) + err := os.WriteFile(customScorecardPatchFile, patchBytes, 0777) + if err != nil { + fmt.Printf("can not write %s %s\n", customScorecardPatchFile, err.Error()) + return err + } + + // append to config/scorecard/kustomization.yaml + kustomizeFile := filepath.Join(tc.Dir, "config", "scorecard", "kustomization.yaml") + f, err := os.OpenFile(kustomizeFile, os.O_APPEND|os.O_WRONLY, 0777) + if err != nil { + fmt.Printf("error in opening scorecard kustomization.yaml file %s\n", err.Error()) + return err + } + defer f.Close() + if _, err := f.WriteString(customScorecardKustomize); err != nil { + fmt.Printf("error in append to scorecard kustomization.yaml %s\n", err.Error()) + return err + } + return nil +} + +// ReplaceScorecardImagesForDev will replaces the scorecard images in the manifests per dev tag which is built +// in the CI based on the code changes made. +func (tc TestContext) ReplaceScorecardImagesForDev() error { + err := kbutil.ReplaceRegexInFile( + filepath.Join(tc.Dir, "config", "scorecard", "patches", "basic.config.yaml"), + scorecardImage, scorecardImageReplace, + ) + if err != nil { + return err + } + + err = kbutil.ReplaceRegexInFile( + filepath.Join(tc.Dir, "config", "scorecard", "patches", "olm.config.yaml"), + scorecardImage, scorecardImageReplace, + ) + return err +} diff --git a/internal/testutils/utils.go b/internal/testutils/utils.go new file mode 100644 index 0000000..cec6537 --- /dev/null +++ b/internal/testutils/utils.go @@ -0,0 +1,222 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testutils + +import ( + "errors" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + kbutil "sigs.k8s.io/kubebuilder/v3/pkg/plugin/util" + kbtestutils "sigs.k8s.io/kubebuilder/v3/test/e2e/utils" +) + +const BinaryName = "operator-sdk" + +// TestContext wraps kubebuilder's e2e TestContext. +type TestContext struct { + *kbtestutils.TestContext + // BundleImageName store the image to use to build the bundle + BundleImageName string + // ProjectName store the project name + ProjectName string + // isPrometheusManagedBySuite is true when the suite tests is installing/uninstalling the Prometheus + isPrometheusManagedBySuite bool + // isOLMManagedBySuite is true when the suite tests is installing/uninstalling the OLM + isOLMManagedBySuite bool +} + +// NewTestContext returns a TestContext containing a new kubebuilder TestContext. +// Construct if your environment is connected to a live cluster, ex. for e2e tests. +func NewTestContext(binaryName string, env ...string) (tc TestContext, err error) { + if tc.TestContext, err = kbtestutils.NewTestContext(binaryName, env...); err != nil { + return tc, err + } + tc.ProjectName = strings.ToLower(filepath.Base(tc.Dir)) + tc.ImageName = makeImageName(tc.ProjectName) + tc.BundleImageName = makeBundleImageName(tc.ProjectName) + tc.isOLMManagedBySuite = true + tc.isPrometheusManagedBySuite = true + return tc, nil +} + +// NewPartialTestContext returns a TestContext containing a partial kubebuilder TestContext. +// This object needs to be populated with GVK information. The underlying TestContext is +// created directly rather than through a constructor so cluster-based setup is skipped. +func NewPartialTestContext(binaryName, dir string, env ...string) (tc TestContext, err error) { + cc := &kbtestutils.CmdContext{ + Env: env, + } + if cc.Dir, err = filepath.Abs(dir); err != nil { + return tc, err + } + projectName := strings.ToLower(filepath.Base(dir)) + + return TestContext{ + TestContext: &kbtestutils.TestContext{ + CmdContext: cc, + BinaryName: binaryName, + ImageName: makeImageName(projectName), + }, + ProjectName: projectName, + BundleImageName: makeBundleImageName(projectName), + }, nil +} + +func makeImageName(projectName string) string { + return fmt.Sprintf("quay.io/example/%s:v0.0.1", projectName) +} + +func makeBundleImageName(projectName string) string { + return fmt.Sprintf("quay.io/example/%s-bundle:v0.0.1", projectName) +} + +// InstallOLMVersion runs 'operator-sdk olm install' for specific version +// and returns any errors emitted by that command. +func (tc TestContext) InstallOLMVersion(version string) error { + cmd := exec.Command(tc.BinaryName, "olm", "install", "--version", version, "--timeout", "4m") + _, err := tc.Run(cmd) + return err +} + +// UninstallOLM runs 'operator-sdk olm uninstall' and logs any errors emitted by that command. +func (tc TestContext) UninstallOLM() { + cmd := exec.Command(tc.BinaryName, "olm", "uninstall") + if _, err := tc.Run(cmd); err != nil { + fmt.Fprintln(GinkgoWriter, "warning: error when uninstalling OLM:", err) + } +} + +// ReplaceInFile replaces all instances of old with new in the file at path. +// todo(camilamacedo86): this func can be pushed to upstream/kb +func ReplaceInFile(path, old, new string) error { + info, err := os.Stat(path) + if err != nil { + return err + } + b, err := os.ReadFile(path) + if err != nil { + return err + } + if !strings.Contains(string(b), old) { + return errors.New("unable to find the content to be replaced") + } + s := strings.Replace(string(b), old, new, -1) + err = os.WriteFile(path, []byte(s), info.Mode()) + if err != nil { + return err + } + return nil +} + +// LoadImageToKindClusterWithName loads a local docker image with the name informed to the kind cluster +func (tc TestContext) LoadImageToKindClusterWithName(image string) error { + cluster := "kind" + if v, ok := os.LookupEnv("KIND_CLUSTER"); ok { + cluster = v + } + kindOptions := []string{"load", "docker-image", "--name", cluster, image} + cmd := exec.Command("kind", kindOptions...) + _, err := tc.Run(cmd) + return err +} + +// InstallPrerequisites will install OLM and Prometheus +// when the cluster kind is Kind and when they are not present on the Cluster +func (tc TestContext) InstallPrerequisites() { + By("checking API resources applied on Cluster") + output, err := tc.Kubectl.Command("api-resources") + Expect(err).NotTo(HaveOccurred()) + if strings.Contains(output, "servicemonitors") { + tc.isPrometheusManagedBySuite = false + } + if strings.Contains(output, "clusterserviceversions") { + tc.isOLMManagedBySuite = false + } + + if tc.isPrometheusManagedBySuite { + By("installing Prometheus") + Expect(tc.InstallPrometheusOperManager()).To(Succeed()) + + By("ensuring provisioned Prometheus Manager Service") + Eventually(func() error { + _, err := tc.Kubectl.Get( + false, + "Service", "prometheus-operator") + return err + }, 3*time.Minute, time.Second).Should(Succeed()) + } + + if tc.isOLMManagedBySuite { + By("installing OLM") + Expect(tc.InstallOLMVersion(OlmVersionForTestSuite)).To(Succeed()) + } +} + +// IsRunningOnKind returns true when the tests are executed in a Kind Cluster +func (tc TestContext) IsRunningOnKind() (bool, error) { + kubectx, err := tc.Kubectl.Command("config", "current-context") + if err != nil { + return false, err + } + return strings.Contains(kubectx, "kind"), nil +} + +// UninstallPrerequisites will uninstall all prerequisites installed via InstallPrerequisites() +func (tc TestContext) UninstallPrerequisites() { + if tc.isPrometheusManagedBySuite { + By("uninstalling Prometheus") + tc.UninstallPrometheusOperManager() + } + if tc.isOLMManagedBySuite { + By("uninstalling OLM") + tc.UninstallOLM() + } +} + +// WrapWarnOutput is a one-liner to wrap an error from a command that returns (string, error) in a warning. +func WrapWarnOutput(_ string, err error) { + if err != nil { + fmt.Fprintf(GinkgoWriter, "warning: %s", err) + } +} + +// WrapWarn is a one-liner to wrap an error from a command that returns (error) in a warning. +func WrapWarn(err error) { + WrapWarnOutput("", err) +} + +func (tc TestContext) UncommentRestrictivePodStandards() error { + configManager := filepath.Join(tc.Dir, "config", "manager", "manager.yaml") + + if err := kbutil.ReplaceInFile(configManager, `# TODO(user): For common cases that do not require escalating privileges + # it is recommended to ensure that all your Pods/Containers are restrictive. + # More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted + # Please uncomment the following code if your project does NOT have to work on old Kubernetes + # versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ). + # seccompProfile: + # type: RuntimeDefault`, `seccompProfile: + type: RuntimeDefault`); err == nil { + return err + } + + return nil +} diff --git a/internal/util/bundleutil/bundleutil.go b/internal/util/bundleutil/bundleutil.go new file mode 100644 index 0000000..eb75456 --- /dev/null +++ b/internal/util/bundleutil/bundleutil.go @@ -0,0 +1,281 @@ +// Copyright 2021 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bundleutil + +import ( + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "sort" + "strings" + "text/template" + + log "github.com/sirupsen/logrus" + "github.com/spf13/viper" + + "github.com/operator-framework/ansible-operator-plugins/internal/flags" + "github.com/operator-framework/ansible-operator-plugins/internal/util/projutil" +) + +var ( + defaultMetadataDir = "metadata" + defaultManifestDir = "manifests" + defaultBundleDockerfilePath = "bundle.Dockerfile" +) + +// BundleMetaData contains the required metadata to build bundles and images. +type BundleMetaData struct { + // BundleDir refers to the directory where generated bundles are to be written. + BundleDir string + + // The PackageName of the operator bundle. + PackageName string + + // Channels and DefaultChannel the operator should be subscribed to. + Channels string + DefaultChannel string + + // BaseImage name to build bundle image. + BaseImage string + + // BuildCommand to run while building image. + BuildCommand string + + // PackageManifestPath where the input manifests are present. + PkgmanifestPath string + + // IsScoreConfigPresent when set to true includes scorecard config annotations + // in bundle metadata. + IsScoreConfigPresent bool + + // Other labels to be added in CSV. + OtherLabels map[string]string +} + +// values to populate bundle metadata/Dockerfile. +type annotationsValues struct { + BundleDir string + PackageName string + Channels string + DefaultChannel string + OtherLabels []string + IsScorecardConfigPresent bool +} + +// GenerateMetadata scaffolds annotations.yaml and bundle.Dockerfile with the provided +// annotation values. +func (meta *BundleMetaData) GenerateMetadata() error { + // Create output directory + if err := os.MkdirAll(meta.BundleDir, projutil.DirMode); err != nil { + return err + } + + // Create annotation values for both bundle.Dockerfile and annotations.yaml, which should + // hold the same set of values always. + values := annotationsValues{ + BundleDir: meta.BundleDir, + PackageName: meta.PackageName, + Channels: meta.Channels, + DefaultChannel: meta.DefaultChannel, + IsScorecardConfigPresent: meta.IsScoreConfigPresent, + } + + for k, v := range meta.OtherLabels { + values.OtherLabels = append(values.OtherLabels, fmt.Sprintf("%s=%s", k, v)) + } + sort.Strings(values.OtherLabels) + + // Write each file + metadataDir := filepath.Join(meta.BundleDir, defaultMetadataDir) + if err := os.MkdirAll(metadataDir, projutil.DirMode); err != nil { + return err + } + + dockerfilePath := defaultBundleDockerfilePath + // If migrating from packagemanifests to bundle, bundle.Dockerfile is present + // inside bundleDir, else it's in the project directory. Hence, dockerfile + // should have the path specified with respect to output directory of resulting bundles. + // Remove this, when pkgman-to-bundle migrate command is removed. + if len(meta.PkgmanifestPath) != 0 { + dockerfilePath = filepath.Join(filepath.Dir(meta.BundleDir), "bundle.Dockerfile") + values.BundleDir = filepath.Base(meta.BundleDir) + } + + templateMap := map[string]*template.Template{ + dockerfilePath: dockerfileTemplate, + filepath.Join(metadataDir, "annotations.yaml"): annotationsTemplate, + } + + for path, tmpl := range templateMap { + log.Info(fmt.Sprintf("Creating %s", path)) + f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return err + } + + defer func() { + if err := f.Close(); err != nil { + log.Error(err) + } + }() + if err = tmpl.Execute(f, values); err != nil { + return err + } + } + log.Infof("Bundle metadata generated successfully") + return nil +} + +// CopyOperatorManifests copies packagemanifestsDir/manifests to bundleDir/manifests. +func (meta *BundleMetaData) CopyOperatorManifests() error { + return copyOperatorManifests(meta.PkgmanifestPath, filepath.Join(meta.BundleDir, defaultManifestDir)) +} + +func copyOperatorManifests(src, dest string) error { + srcInfo, err := os.Stat(src) + if err != nil { + return fmt.Errorf("error reading source directory %v", err) + } + + if err := os.MkdirAll(dest, srcInfo.Mode()); err != nil { + return err + } + + srcFiles, err := os.ReadDir(src) + if err != nil { + return err + } + + for _, f := range srcFiles { + srcPath := filepath.Join(src, f.Name()) + destPath := filepath.Join(dest, f.Name()) + + if f.IsDir() { + // TODO(verify): we may have to log an error here instead of recursively copying + // if there are no sub-folders allowed under manifests dir of a packagemanifest. + if err = copyOperatorManifests(srcPath, destPath); err != nil { + return err + } + } else { + srcFile, err := os.Open(srcPath) + if err != nil { + return err + } + defer srcFile.Close() + + destFile, err := os.Create(destPath) + if err != nil { + return err + } + defer destFile.Close() + + _, err = io.Copy(destFile, srcFile) + if err != nil { + return err + } + } + } + return nil +} + +// BuildBundleImage builds the bundle image with the provided command or using +// docker build command. +func (meta *BundleMetaData) BuildBundleImage(tag string) error { + + img := fmt.Sprintf("%s:%s", meta.BaseImage, tag) + + // switch back to current working directory, so that subsequent + // bundle version images can be built. + cwd, err := os.Getwd() + if err != nil { + return err + } + + defer func() { + if err := os.Chdir(cwd); err != nil { + log.Error(cwd) + } + }() + + if err := os.Chdir(filepath.Dir(meta.BundleDir)); err != nil { + return err + } + + if len(meta.BuildCommand) != 0 { + // TODO(varsha): Make this more user friendly by accepting a template which + // can executed in each bundle subdirectory. + log.Infof("Using the specified command to build image") + commandArg := strings.Split(meta.BuildCommand, " ") + + // append the tag and build context to the command + cmd := exec.Command(commandArg[0], append(commandArg[1:], img)...) + output, err := cmd.CombinedOutput() + if err != nil || viper.GetBool(flags.VerboseOpt) { + fmt.Println(string(output)) + } + if err != nil { + return err + } + } else { + output, err := exec.Command("docker", "build", "-f", "bundle.Dockerfile", "-t", img, ".").CombinedOutput() + if err != nil || viper.GetBool(flags.VerboseOpt) { + fmt.Println(string(output)) + } + if err != nil { + return err + } + } + log.Infof("Successfully built image %s", img) + return nil +} + +// WriteScorecardConfig creates the scorecard directory in the bundle and copies the +// configuration yaml to bundle. +func (meta *BundleMetaData) WriteScorecardConfig(inputConfigPath string) error { + // If the config is already copied as a part of the manifest directory + // then ensure that it is deleted to remove duplicates. + _, filename := filepath.Split(inputConfigPath) + if err := deleteExistingScorecardConfig(meta.BundleDir, filename); err != nil { + return err + } + + scorecardDir := filepath.Join(meta.BundleDir, "tests", "scorecard") + + // Create directory for scorecard config + if err := os.MkdirAll(scorecardDir, projutil.DirMode); err != nil { + return err + } + + log.Info(fmt.Sprintf("Writing scorecard config in %s", scorecardDir)) + b, err := os.ReadFile(inputConfigPath) + if err != nil { + return err + } + + err = os.WriteFile(filepath.Join(scorecardDir, "config.yaml"), b, 0644) + if err != nil { + return fmt.Errorf("error writing scorecard config %v", err) + } + return nil +} + +// deleteExistingScorecardConfig checks if there is an existing scorecard config file +// in manifests/ folder, if present it deletes it. +func deleteExistingScorecardConfig(bundleDir, filename string) error { + scorecardConfigPath := filepath.Join(bundleDir, defaultManifestDir, filename) + return os.RemoveAll(scorecardConfigPath) +} diff --git a/internal/util/bundleutil/template.go b/internal/util/bundleutil/template.go new file mode 100644 index 0000000..b0a5c64 --- /dev/null +++ b/internal/util/bundleutil/template.go @@ -0,0 +1,79 @@ +// Copyright 2021 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bundleutil + +import ( + "strings" + "text/template" +) + +// Transform a Dockerfile label to a YAML kv. +var funcs = template.FuncMap{ + "toYAML": func(s string) string { return strings.ReplaceAll(s, "=", ": ") }, +} + +// Template for bundle.Dockerfile, containing scorecard labels. +var dockerfileTemplate = template.Must(template.New("").Funcs(funcs).Parse(`FROM scratch + +# Core bundle labels. +LABEL operators.operatorframework.io.bundle.mediatype.v1=registry+v1 +LABEL operators.operatorframework.io.bundle.manifests.v1=manifests/ +LABEL operators.operatorframework.io.bundle.metadata.v1=metadata/ +LABEL operators.operatorframework.io.bundle.package.v1={{ .PackageName }} +LABEL operators.operatorframework.io.bundle.channels.v1={{ .Channels }} +{{- if .DefaultChannel }} +LABEL operators.operatorframework.io.bundle.channel.default.v1={{ .DefaultChannel }} +{{- end }} +{{- range $i, $l := .OtherLabels }} +LABEL {{ $l }} +{{- end }} + +{{- if .IsScorecardConfigPresent }} + +# Labels for testing. +LABEL operators.operatorframework.io.test.mediatype.v1=scorecard+v1 +LABEL operators.operatorframework.io.test.config.v1=tests/scorecard/ +{{- end }} + +# Copy files to locations specified by labels. +COPY {{ .BundleDir }}/manifests /manifests/ +COPY {{ .BundleDir }}/metadata /metadata/ +{{- if .IsScorecardConfigPresent }} +COPY {{ .BundleDir }}/tests/scorecard /tests/scorecard/ +{{- end }} +`)) + +// Template for annotations.yaml, containing scorecard labels. +var annotationsTemplate = template.Must(template.New("").Funcs(funcs).Parse(`annotations: + # Core bundle annotations. + operators.operatorframework.io.bundle.mediatype.v1: registry+v1 + operators.operatorframework.io.bundle.manifests.v1: manifests/ + operators.operatorframework.io.bundle.metadata.v1: metadata/ + operators.operatorframework.io.bundle.package.v1: {{ .PackageName }} + operators.operatorframework.io.bundle.channels.v1: {{ .Channels }} + {{- if .DefaultChannel }} + operators.operatorframework.io.bundle.channel.default.v1: {{ .DefaultChannel }} + {{- end }} + {{- range $i, $l := .OtherLabels }} + {{ toYAML $l }} + {{- end }} + + {{- if .IsScorecardConfigPresent }} + + # Annotations for testing. + operators.operatorframework.io.test.mediatype.v1: scorecard+v1 + operators.operatorframework.io.test.config.v1: tests/scorecard/ + {{- end }} +`)) diff --git a/internal/util/k8sutil/api.go b/internal/util/k8sutil/api.go new file mode 100644 index 0000000..92a3134 --- /dev/null +++ b/internal/util/k8sutil/api.go @@ -0,0 +1,205 @@ +// Copyright 2019 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k8sutil + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + + "github.com/operator-framework/operator-registry/pkg/registry" + log "github.com/sirupsen/logrus" + apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/version" + "sigs.k8s.io/yaml" +) + +// GetCustomResourceDefinitions returns all CRD manifests of both v1 and v1beta1 +// versions in the directory crdsDir. If a duplicate object with different API +// versions is found, and error is returned. +func GetCustomResourceDefinitions(crdsDir string) ( + v1crds []apiextv1.CustomResourceDefinition, + v1beta1crds []apiextv1beta1.CustomResourceDefinition, + err error) { + + infos, err := os.ReadDir(crdsDir) + if err != nil { + return nil, nil, err + } + + // The set of all custom resource GVKs in found CRDs. + crGVKSet := map[schema.GroupVersionKind]struct{}{} + for _, info := range infos { + path := filepath.Join(crdsDir, info.Name()) + + if info.IsDir() { + log.Debugf("Skipping dir: %s", path) + continue + } + + b, err := os.ReadFile(path) + if err != nil { + return nil, nil, fmt.Errorf("error reading manifest %s: %w", path, err) + } + + scanner := NewYAMLScanner(bytes.NewBuffer(b)) + for scanner.Scan() { + manifest := scanner.Bytes() + typeMeta, err := GetTypeMetaFromBytes(manifest) + if err != nil { + log.Debugf("Skipping manifest in %s: %v", path, err) + continue + } + if typeMeta.Kind != "CustomResourceDefinition" { + continue + } + + // Unmarshal based on CRD version. + var crGVKs []schema.GroupVersionKind + switch gvk := typeMeta.GroupVersionKind(); gvk.Version { + case apiextv1.SchemeGroupVersion.Version: + crd := apiextv1.CustomResourceDefinition{} + if err = yaml.Unmarshal(manifest, &crd); err != nil { + return nil, nil, err + } + v1crds = append(v1crds, crd) + crGVKs = append(crGVKs, GVKsForV1CustomResourceDefinitions(crd)...) + case apiextv1beta1.SchemeGroupVersion.Version: + crd := apiextv1beta1.CustomResourceDefinition{} + if err := yaml.Unmarshal(manifest, &crd); err != nil { + return nil, nil, err + } + v1beta1crds = append(v1beta1crds, crd) + crGVKs = append(crGVKs, GVKsForV1beta1CustomResourceDefinitions(crd)...) + default: + return nil, nil, fmt.Errorf("unrecognized CustomResourceDefinition version %q", gvk.Version) + } + + // Check if any GVK in crd is a duplicate. + for _, gvk := range crGVKs { + if _, hasGVK := crGVKSet[gvk]; hasGVK { + return nil, nil, fmt.Errorf("duplicate custom resource GVK %s in %s", gvk, path) + } + crGVKSet[gvk] = struct{}{} + } + + } + if err = scanner.Err(); err != nil { + return nil, nil, fmt.Errorf("error scanning %s: %w", path, err) + } + } + return v1crds, v1beta1crds, nil +} + +// DefinitionsForV1CustomResourceDefinitions returns definition keys for all +// custom resource versions in each crd in crds. +func DefinitionsForV1CustomResourceDefinitions(crds ...apiextv1.CustomResourceDefinition) (keys []registry.DefinitionKey) { + for _, crd := range crds { + for _, ver := range crd.Spec.Versions { + if !ver.Served { + log.Debugf("Not adding unserved CRD %q version %q to set of owned keys", crd.GetName(), ver.Name) + continue + } + keys = append(keys, registry.DefinitionKey{ + Name: crd.GetName(), + Group: crd.Spec.Group, + Version: ver.Name, + Kind: crd.Spec.Names.Kind, + }) + } + } + return keys +} + +// DefinitionsForV1beta1CustomResourceDefinitions returns definition keys for all +// custom resource versions in each crd in crds. +func DefinitionsForV1beta1CustomResourceDefinitions(crds ...apiextv1beta1.CustomResourceDefinition) (keys []registry.DefinitionKey) { + for _, crd := range crds { + if len(crd.Spec.Versions) == 0 { + keys = append(keys, registry.DefinitionKey{ + Name: crd.GetName(), + Group: crd.Spec.Group, + Version: crd.Spec.Version, + Kind: crd.Spec.Names.Kind, + }) + } + for _, ver := range crd.Spec.Versions { + if !ver.Served { + log.Debugf("Not adding unserved CRD %q version %q to set of owned keys", crd.GetName(), ver.Name) + continue + } + keys = append(keys, registry.DefinitionKey{ + Name: crd.GetName(), + Group: crd.Spec.Group, + Version: ver.Name, + Kind: crd.Spec.Names.Kind, + }) + } + } + return keys +} + +// GVKsForV1CustomResourceDefinitions returns GroupVersionKind's for all +// custom resource versions in each crd in crds. +func GVKsForV1CustomResourceDefinitions(crds ...apiextv1.CustomResourceDefinition) (gvks []schema.GroupVersionKind) { + for _, key := range DefinitionsForV1CustomResourceDefinitions(crds...) { + gvks = append(gvks, schema.GroupVersionKind{ + Group: key.Group, + Version: key.Version, + Kind: key.Kind, + }) + } + return gvks +} + +// GVKsForV1beta1CustomResourceDefinitions returns GroupVersionKind's for all +// custom resource versions in each crd in crds. +func GVKsForV1beta1CustomResourceDefinitions(crds ...apiextv1beta1.CustomResourceDefinition) (gvks []schema.GroupVersionKind) { + for _, key := range DefinitionsForV1beta1CustomResourceDefinitions(crds...) { + gvks = append(gvks, schema.GroupVersionKind{ + Group: key.Group, + Version: key.Version, + Kind: key.Kind, + }) + } + return gvks +} + +type CRDVersions []apiextv1beta1.CustomResourceDefinitionVersion + +func (vs CRDVersions) Len() int { return len(vs) } +func (vs CRDVersions) Less(i, j int) bool { + return version.CompareKubeAwareVersionStrings(vs[i].Name, vs[j].Name) > 0 +} +func (vs CRDVersions) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } + +func Convertv1beta1Tov1CustomResourceDefinition(in *apiextv1beta1.CustomResourceDefinition) (*apiextv1.CustomResourceDefinition, error) { + var unversioned apiext.CustomResourceDefinition + if err := apiextv1beta1.Convert_v1beta1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(in, &unversioned, nil); err != nil { + return nil, err + } + + var out apiextv1.CustomResourceDefinition + out.TypeMeta.APIVersion = apiextv1.SchemeGroupVersion.String() + out.TypeMeta.Kind = "CustomResourceDefinition" + if err := apiextv1.Convert_apiextensions_CustomResourceDefinition_To_v1_CustomResourceDefinition(&unversioned, &out, nil); err != nil { + return nil, err + } + return &out, nil +} diff --git a/internal/util/k8sutil/api_test.go b/internal/util/k8sutil/api_test.go new file mode 100644 index 0000000..a666dc6 --- /dev/null +++ b/internal/util/k8sutil/api_test.go @@ -0,0 +1,74 @@ +// Copyright 2019 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k8sutil + +import ( + "reflect" + "sort" + "testing" + + apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" +) + +func TestSortVersions(t *testing.T) { + cases := []struct { + inputVersions []string + expected []string + }{ + {[]string{""}, []string{""}}, + {[]string{"v1"}, []string{"v1"}}, + {[]string{"v1alpha1"}, []string{"v1alpha1"}}, + {[]string{"v1alpha1", "v1"}, []string{"v1", "v1alpha1"}}, + { + []string{"foo1", "foo10", "foo2", "foo13", "foo52", "foo23", "foo32", "foo33", "foo100"}, + []string{"foo1", "foo10", "foo100", "foo13", "foo2", "foo23", "foo32", "foo33", "foo52"}, + }, + { + []string{"v1alpha10", "v1alpha1", "v1alpha2000", "v1alpha3", "v1alpha2", "v1alpha300"}, + []string{"v1alpha2000", "v1alpha300", "v1alpha10", "v1alpha3", "v1alpha2", "v1alpha1"}, + }, + { + []string{"v3beta1", "v12alpha1", "v12alpha2", "v10beta3", "v1", "v11alpha2", "foo1", "v10", + "v2", "foo10", "v11beta2"}, + []string{"v10", "v2", "v1", "v11beta2", "v10beta3", "v3beta1", "v12alpha2", "v12alpha1", + "v11alpha2", "foo1", "foo10"}, + }, + } + + for _, c := range cases { + cvs := stringsToCRDVersions(c.inputVersions) + sort.Sort(cvs) + vs := crdVersionsToStrings(cvs) + if !reflect.DeepEqual(vs, c.expected) { + t.Errorf("Output not sorted as expected:\noutput: %+q\nexpected: %+q", vs, c.expected) + } + } +} + +func stringsToCRDVersions(vs []string) (cvs CRDVersions) { + for _, v := range vs { + cvs = append(cvs, apiextv1beta1.CustomResourceDefinitionVersion{ + Name: v, + }) + } + return cvs +} + +func crdVersionsToStrings(cvs []apiextv1beta1.CustomResourceDefinitionVersion) (vs []string) { + for _, v := range cvs { + vs = append(vs, v.Name) + } + return vs +} diff --git a/internal/util/k8sutil/constants.go b/internal/util/k8sutil/constants.go new file mode 100644 index 0000000..87532c2 --- /dev/null +++ b/internal/util/k8sutil/constants.go @@ -0,0 +1,26 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k8sutil + +const ( + // KubeConfigEnvVar defines the env variable KUBECONFIG which + // contains the kubeconfig file path. + KubeConfigEnvVar = "KUBECONFIG" + + // WatchNamespaceEnvVar is the constant for env variable WATCH_NAMESPACE + // which is the namespace where the watch activity happens. + // this value is empty if the operator is running with clusterScope. + WatchNamespaceEnvVar = "WATCH_NAMESPACE" +) diff --git a/internal/util/k8sutil/k8sutil.go b/internal/util/k8sutil/k8sutil.go new file mode 100644 index 0000000..00792f7 --- /dev/null +++ b/internal/util/k8sutil/k8sutil.go @@ -0,0 +1,166 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k8sutil + +import ( + "bytes" + "errors" + "fmt" + "io" + "regexp" + "strings" + "unicode" + + "golang.org/x/text/cases" + "golang.org/x/text/language" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/yaml" +) + +// GetDisplayName turns a project dir name in any of {snake, chain, camel} +// cases, hierarchical dot structure, or space-delimited into a +// space-delimited, title'd display name. +// Ex. "another-_AppOperator_againTwiceThrice More" +// -> "Another App Operator Again Twice Thrice More" +func GetDisplayName(name string) string { + for _, sep := range ".-_ " { + splitName := strings.Split(name, string(sep)) + for i := 0; i < len(splitName); i++ { + if splitName[i] == "" { + splitName = append(splitName[:i], splitName[i+1:]...) + i-- + } else { + splitName[i] = strings.TrimSpace(splitName[i]) + } + } + name = strings.Join(splitName, " ") + } + splitName := strings.Split(name, " ") + for i, word := range splitName { + temp := word + o := 0 + for j, r := range word { + if unicode.IsUpper(r) { + if j > 0 && !unicode.IsUpper(rune(word[j-1])) { + index := j + o + temp = temp[0:index] + " " + temp[index:] + o++ + } + } + } + splitName[i] = temp + } + caser := cases.Title(language.AmericanEnglish, cases.NoLower) + return strings.TrimSpace(caser.String(strings.Join(splitName, " "))) +} + +// GetTypeMetaFromBytes gets the type and object metadata from b. b is assumed +// to be a single Kubernetes resource manifest. +func GetTypeMetaFromBytes(b []byte) (t metav1.TypeMeta, err error) { + u := unstructured.Unstructured{} + r := bytes.NewReader(b) + dec := yaml.NewYAMLOrJSONDecoder(r, 8) + // There is only one YAML doc if there are no more bytes to be read or EOF + // is hit. + if err := dec.Decode(&u); err == nil && r.Len() != 0 { + return t, errors.New("error getting TypeMeta from bytes: more than one manifest in file") + } else if err != nil && err != io.EOF { + return t, fmt.Errorf("error getting TypeMeta from bytes: %v", err) + } + return metav1.TypeMeta{ + APIVersion: u.GetAPIVersion(), + Kind: u.GetKind(), + }, nil +} + +// dns1123LabelRegexp defines the character set allowed in a DNS 1123 label. +var dns1123LabelRegexp = regexp.MustCompile("[^a-zA-Z0-9]+") + +// FormatOperatorNameDNS1123 ensures name is DNS1123 label-compliant by +// replacing all non-compliant UTF-8 characters with "-". +func FormatOperatorNameDNS1123(name string) string { + if len(validation.IsDNS1123Label(name)) != 0 { + // Use - for any of the non-matching characters + n := dns1123LabelRegexp.ReplaceAllString(name, "-") + + // Now let's remove any leading or trailing - + return strings.ToLower(strings.Trim(n, "-")) + } + return name +} + +// TrimDNS1123Label trims a label to meet the DNS 1123 label length requirement +// by removing characters from the beginning of label such that len(label) <= 63. +func TrimDNS1123Label(label string) string { + if len(label) > validation.DNS1123LabelMaxLength { + return strings.Trim(label[len(label)-validation.DNS1123LabelMaxLength:], "-") + } + return label +} + +// SupportsOwnerReference checks whether a given dependent supports owner references, based on the owner. +// The namespace of the dependent resource can either be passed in explicitly, otherwise it will be +// extracted from the dependent runtime.Object. +// This function performs following checks: +// +// -- True: Owner is cluster-scoped. +// -- True: Both Owner and dependent are Namespaced with in same namespace. +// -- False: Owner is Namespaced and dependent is Cluster-scoped. +// -- False: Both Owner and dependent are Namespaced with different namespaces. +func SupportsOwnerReference(restMapper meta.RESTMapper, owner, dependent runtime.Object, depNamespace string) (bool, error) { + ownerGVK := owner.GetObjectKind().GroupVersionKind() + ownerMapping, err := restMapper.RESTMapping(ownerGVK.GroupKind(), ownerGVK.Version) + if err != nil { + return false, err + } + mOwner, err := meta.Accessor(owner) + if err != nil { + return false, err + } + + depGVK := dependent.GetObjectKind().GroupVersionKind() + depMapping, err := restMapper.RESTMapping(depGVK.GroupKind(), depGVK.Version) + if err != nil { + return false, err + } + mDep, err := meta.Accessor(dependent) + if err != nil { + return false, err + } + ownerClusterScoped := ownerMapping.Scope.Name() == meta.RESTScopeNameRoot + ownerNamespace := mOwner.GetNamespace() + depClusterScoped := depMapping.Scope.Name() == meta.RESTScopeNameRoot + if depNamespace == "" { + depNamespace = mDep.GetNamespace() + } + + if ownerClusterScoped { + return true, nil + } + + if depClusterScoped { + return false, nil + } + + if ownerNamespace != depNamespace { + return false, nil + } + // Both owner and dependent are namespace-scoped and in the same namespace. + return true, nil +} diff --git a/internal/util/k8sutil/k8sutil_test.go b/internal/util/k8sutil/k8sutil_test.go new file mode 100644 index 0000000..a5410aa --- /dev/null +++ b/internal/util/k8sutil/k8sutil_test.go @@ -0,0 +1,385 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k8sutil + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func TestGetDisplayName(t *testing.T) { + cases := []struct { + input, wanted string + }{ + {"Appoperator", "Appoperator"}, + {"appoperator", "Appoperator"}, + {"appoperatoR", "Appoperato R"}, + {"AppOperator", "App Operator"}, + {"appOperator", "App Operator"}, + {"app-operator", "App Operator"}, + {"app-_operator", "App Operator"}, + {"App-operator", "App Operator"}, + {"app-_Operator", "App Operator"}, + {"app--Operator", "App Operator"}, + {"app--_Operator", "App Operator"}, + {"APP", "APP"}, + {"another-AppOperator_againTwiceThrice More", "Another App Operator Again Twice Thrice More"}, + } + + for _, c := range cases { + dn := GetDisplayName(c.input) + if dn != c.wanted { + t.Errorf("Wanted %s, got %s", c.wanted, dn) + } + } +} + +func TestSupportsOwnerReference(t *testing.T) { + type testcase struct { + name string + restMapper meta.RESTMapper + owner runtime.Object + dependent runtime.Object + result bool + depNamespace string + } + + var defaultVersion []schema.GroupVersion + restMapper := meta.NewDefaultRESTMapper(defaultVersion) + + GVK1 := schema.GroupVersionKind{ + Group: "apps", + Version: "v1alpha1", + Kind: "MyNamespaceKind", + } + GVK2 := schema.GroupVersionKind{ + Group: "rbac", + Version: "v1alpha1", + Kind: "MyClusterKind", + } + + restMapper.Add(GVK1, meta.RESTScopeNamespace) + restMapper.Add(GVK2, meta.RESTScopeRoot) + + cases := []testcase{ + { + name: "Returns false when owner is Namespaced and dependent resource is Clusterscoped.", + restMapper: restMapper, + owner: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "MyNamespaceKind", + "apiVersion": "apps/v1alpha1", + "metadata": map[string]interface{}{ + "name": "example-nginx-controller", + "namespace": "ns", + }, + }, + }, + dependent: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "MyClusterKind", + "apiVersion": "rbac/v1alpha1", + "metadata": map[string]interface{}{ + "name": "example-nginx-role", + "namespace": "ns", + }, + }, + }, + result: false, + }, + { + name: "Returns true for owner and dependant are both ClusterScoped.", + restMapper: restMapper, + owner: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "MyClusterKind", + "apiVersion": "rbac/v1alpha1", + "metadata": map[string]interface{}{ + "name": "example-nginx-controller", + "namespace": "ns", + }, + }, + }, + dependent: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "MyClusterKind", + "apiVersion": "rbac/v1alpha1", + "metadata": map[string]interface{}{ + "name": "example-nginx-role", + "namespace": "ns", + }, + }, + }, + result: true, + }, + { + name: "Returns true when owner and dependant are Namespaced with in same namespace.", + restMapper: restMapper, + owner: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "MyNamespaceKind", + "apiVersion": "apps/v1alpha1", + "metadata": map[string]interface{}{ + "name": "example-nginx-controller", + "namespace": "ns", + }, + }, + }, + dependent: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "MyNamespaceKind", + "apiVersion": "apps/v1alpha1", + "metadata": map[string]interface{}{ + "name": "example-nginx-role", + "namespace": "ns", + }, + }, + }, + result: true, + }, + { + name: "Returns false when owner,and dependant are Namespaced, with different namespaces.", + restMapper: restMapper, + owner: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "MyNamespaceKind", + "apiVersion": "apps/v1alpha1", + "metadata": map[string]interface{}{ + "name": "example-nginx-controller", + "namespace": "ns1", + }, + }, + }, + dependent: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "MyNamespaceKind", + "apiVersion": "apps/v1alpha1", + "metadata": map[string]interface{}{ + "name": "example-nginx-role", + "namespace": "ns", + }, + }, + }, + result: false, + }, + { + name: "Returns false for invalid Owner Kind.", + restMapper: restMapper, + owner: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "Dummy", + "apiVersion": "apps/v1alpha1", + "metadata": map[string]interface{}{ + "name": "example-nginx-controller", + "namespace": "ns1", + }, + }, + }, + dependent: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "MyNamespaceKind", + "apiVersion": "apps/v1alpha1", + "metadata": map[string]interface{}{ + "name": "example-nginx-role", + "namespace": "ns", + }, + }, + }, + result: false, + }, + { + name: "Returns false for invalid dependant Kind.", + restMapper: restMapper, + owner: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "MyNamespaceKind", + "apiVersion": "apps/v1alpha1", + "metadata": map[string]interface{}{ + "name": "example-nginx-controller", + "namespace": "ns1", + }, + }, + }, + dependent: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "Dummy", + "apiVersion": "apps/v1alpha1", + "metadata": map[string]interface{}{ + "name": "example-nginx-role", + "namespace": "ns", + }, + }, + }, + result: false, + }, + { + name: "Returns true if depNamespace provided and matches.", + restMapper: restMapper, + depNamespace: "ns", + owner: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "MyNamespaceKind", + "apiVersion": "apps/v1alpha1", + "metadata": map[string]interface{}{ + "name": "example-nginx-controller", + "namespace": "ns", + }, + }, + }, + dependent: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "MyNamespaceKind", + "apiVersion": "apps/v1alpha1", + "metadata": map[string]interface{}{ + "name": "example-nginx-role", + }, + }, + }, + result: true, + }, + { + name: "Returns false if depNamespace provided and doesn't match.", + restMapper: restMapper, + depNamespace: "ns1", + owner: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "MyNamespaceKind", + "apiVersion": "apps/v1alpha1", + "metadata": map[string]interface{}{ + "name": "example-nginx-controller", + "namespace": "ns", + }, + }, + }, + dependent: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "MyNamespaceKind", + "apiVersion": "apps/v1alpha1", + "metadata": map[string]interface{}{ + "name": "example-nginx-role", + }, + }, + }, + result: false, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + useOwner, err := SupportsOwnerReference(c.restMapper, c.owner, c.dependent, c.depNamespace) + if err != nil { + assert.Error(t, err) + } + assert.Equal(t, c.result, useOwner) + }) + } +} + +func TestTrimDNS1123Label(t *testing.T) { + type testcase struct { + name string + label string + expected string + } + testcases := []testcase{ + { + name: "return valid truncated values", + label: "quay-io-raffaelespazzoli-proactive-node-scaling-operator-bundle-latest", + expected: "raffaelespazzoli-proactive-node-scaling-operator-bundle-latest", + }, + { + name: "valid labels with proper length are noops", + label: "raffaelespazzoli-proactive-node-scaling-operator-bundle-latest", + expected: "raffaelespazzoli-proactive-node-scaling-operator-bundle-latest", + }, + { + name: "short invalid labels are left alone", + label: "-$*@*#fixed-invalid(__$)@+==-name-#$($", + expected: "-$*@*#fixed-invalid(__$)@+==-name-#$($", + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + result := TrimDNS1123Label(tc.label) + assert.Equal(t, tc.expected, result) + }) + } +} + +func TestFormatOperatorNameDNS1123(t *testing.T) { + type testcase struct { + name string + label string + expected string + } + testcases := []testcase{ + { + name: "should not start with -", + label: "-doesnot-start-with-hyphen", + expected: "doesnot-start-with-hyphen", + }, + { + name: "should not start with non-alphanumeric", + label: "$@*#(@does-notstart-garbage", + expected: "does-notstart-garbage", + }, + { + name: "should not have non-alphanumeric", + label: "sample-1234$@*#(@does-notstart-garbage", + expected: "sample-1234-does-notstart-garbage", + }, + { + name: "should not end with non-alphanumeric", + label: "sample-1234-does-notstart-garbage#$*@#*($_!-_@(", + expected: "sample-1234-does-notstart-garbage", + }, + { + name: "should not start or end with hyphen", + label: "-does-not-start-or-end-with-hyphen---", + expected: "does-not-start-or-end-with-hyphen", + }, + { + name: "empty string is a noop", + label: "", + expected: "", + }, + { + name: "string of invalid characters results in empty string", + label: "@#@#)$*!!_$#*$*!@", + expected: "", + }, + { + name: "valid long names are not trimmed", + label: "quay-io-raffaelespazzoli-proactive-node-scaling-operator-bundle-latest", + expected: "quay-io-raffaelespazzoli-proactive-node-scaling-operator-bundle-latest", + }, + { + name: "should not contain capital letters", + label: "QUAY-IO-gobble-gobBLE", + expected: "quay-io-gobble-gobble", + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + result := FormatOperatorNameDNS1123(tc.label) + assert.Equal(t, tc.expected, result) + }) + } +} diff --git a/internal/util/k8sutil/object.go b/internal/util/k8sutil/object.go new file mode 100644 index 0000000..9f7d23c --- /dev/null +++ b/internal/util/k8sutil/object.go @@ -0,0 +1,55 @@ +// Copyright 2019 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k8sutil + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +type MarshalFunc func(interface{}) ([]byte, error) + +// GetObjectBytes marshalls an object with m and removes runtime-managed fields: +// 'status', 'creationTimestamp' +func GetObjectBytes(obj interface{}, m MarshalFunc) ([]byte, error) { + u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return nil, err + } + deleteKeys := []string{"status", "creationTimestamp"} + for _, dk := range deleteKeys { + deleteKeyFromUnstructured(u, dk) + } + return m(u) +} + +func deleteKeyFromUnstructured(u map[string]interface{}, key string) { + if _, ok := u[key]; ok { + delete(u, key) + return + } + + for _, v := range u { + switch t := v.(type) { + case map[string]interface{}: + deleteKeyFromUnstructured(t, key) + case []interface{}: + for _, ti := range t { + if m, ok := ti.(map[string]interface{}); ok { + deleteKeyFromUnstructured(m, key) + } + } + } + } +} diff --git a/internal/util/k8sutil/scan.go b/internal/util/k8sutil/scan.go new file mode 100644 index 0000000..14e0790 --- /dev/null +++ b/internal/util/k8sutil/scan.go @@ -0,0 +1,86 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k8sutil + +import ( + "bufio" + "bytes" + "io" + + k8syaml "k8s.io/apimachinery/pkg/util/yaml" +) + +const maxExecutiveEmpties = 100 + +// Scanner scans a yaml manifest file for manifest tokens delimited by "---". +// See bufio.Scanner for semantics. +type Scanner struct { + reader *k8syaml.YAMLReader + token []byte // Last token returned by split. + err error // Sticky error. + empties int // Count of successive empty tokens. + done bool // Scan has finished. +} + +func NewYAMLScanner(r io.Reader) *Scanner { + return &Scanner{reader: k8syaml.NewYAMLReader(bufio.NewReader(r))} +} + +func (s *Scanner) Err() error { + if s.err == io.EOF { + return nil + } + return s.err +} + +func (s *Scanner) Scan() bool { + if s.done { + return false + } + + var ( + tok []byte + err error + ) + + for { + tok, err = s.reader.Read() + if err != nil { + if err == io.EOF { + s.done = true + } + s.err = err + return false + } + if len(bytes.TrimSpace(tok)) == 0 { + s.empties++ + if s.empties > maxExecutiveEmpties { + panic("yaml.Scan: too many empty tokens without progressing") + } + continue + } + s.empties = 0 + s.token = tok + return true + } +} + +func (s *Scanner) Text() string { + return string(s.token) +} + +func (s *Scanner) Bytes() []byte { + return s.token +} diff --git a/internal/util/projutil/interactive_promt_util.go b/internal/util/projutil/interactive_promt_util.go new file mode 100644 index 0000000..780eb05 --- /dev/null +++ b/internal/util/projutil/interactive_promt_util.go @@ -0,0 +1,113 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package projutil + +import ( + "bufio" + "fmt" + "io" + "log" + "os" + "strings" +) + +// InteractiveLevel captures the user preference on the generation of interactive +// commands. +type InteractiveLevel int + +const ( + // User has not turned interactive mode on or off, default to off. + InteractiveSoftOff InteractiveLevel = iota + // User has explicitly turned interactive mode off. + InteractiveHardOff + // User only explicitly turned interactive mode on. + InteractiveOnAll +) + +func printMessage(msg string, isOptional bool) { + fmt.Println() + if isOptional { + fmt.Print(strings.TrimSpace(msg) + " (optional): " + "\n" + "> ") + } else { + fmt.Print(strings.TrimSpace(msg) + " (required): " + "\n" + "> ") + } +} + +func GetRequiredInput(msg string) string { + return getRequiredInput(os.Stdin, msg) +} + +func getRequiredInput(rd io.Reader, msg string) string { + reader := bufio.NewReader(rd) + + for { + printMessage(msg, false) + value := readInput(reader) + if value != "" { + return value + } + fmt.Printf("Input is required. ") + } +} + +func GetOptionalInput(msg string) string { + printMessage(msg, true) + value := readInput(bufio.NewReader(os.Stdin)) + return value +} + +func GetStringArray(msg string) []string { + return getStringArray(os.Stdin, msg) +} + +func getStringArray(rd io.Reader, msg string) []string { + reader := bufio.NewReader(rd) + for { + printMessage(msg, false) + value := readArray(reader) + if len(value) != 0 && len(value[0]) != 0 { + return value + } + fmt.Printf("No list provided. ") + } +} + +// readstdin reads a line from stdin and returns the value. +func readLine(reader *bufio.Reader) string { + text, err := reader.ReadString('\n') + if err != nil { + log.Fatalf("Error when reading input: %v", err) + } + return strings.Trim(strings.TrimSpace(text), "`'\"") +} + +func readInput(reader *bufio.Reader) string { + for { + text := readLine(reader) + return text + } +} + +// readArray parses the line from stdin, returns an array +// of words. +func readArray(reader *bufio.Reader) []string { + arr := make([]string, 0) + text := readLine(reader) + + for _, words := range strings.Split(text, ",") { + arr = append(arr, strings.TrimSpace(words)) + } + return arr +} diff --git a/internal/util/projutil/interactive_promt_util_test.go b/internal/util/projutil/interactive_promt_util_test.go new file mode 100644 index 0000000..e5ff36f --- /dev/null +++ b/internal/util/projutil/interactive_promt_util_test.go @@ -0,0 +1,103 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package projutil + +import ( + "bytes" + "reflect" + "testing" +) + +func TestUserInput(t *testing.T) { + type args struct { + msg string + content []byte + } + + tests := []struct { + name string + args args + want string + }{ + { + name: "test when user provides input to the command", + args: args{ + msg: "Enter a word: ", + content: []byte("Memcached Operator\n"), + }, + want: "Memcached Operator", + }, + { + name: "test when user does not provide input and prompt appears again", + args: args{ + msg: "Enter a word: ", + content: []byte("\nMemcached Operator\n"), + }, + want: "Memcached Operator", + }, + { + name: "test when user provides quoted input to the command", + args: args{ + msg: "Enter a word: ", + content: []byte("'Memcached Operator'\n"), + }, + want: "Memcached Operator", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := getRequiredInput(bytes.NewBuffer(tt.args.content), tt.args.msg); got != tt.want { + t.Errorf("GetRequiredInput() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestUserInputStringArray(t *testing.T) { + type args struct { + msg string + content []byte + } + + tests := []struct { + name string + args args + want []string + }{ + { + name: "test when user provides input to the command", + args: args{ + msg: "Enter list of words", + content: []byte("app, memcached-operator \n"), + }, + want: []string{"app", "memcached-operator"}, + }, + { + name: "test when user does not provide input and prompt appears again", + args: args{ + msg: "Enter list of words", + content: []byte("\noperator, app\n"), + }, + want: []string{"operator", "app"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := getStringArray(bytes.NewBuffer(tt.args.content), tt.args.msg); !reflect.DeepEqual(got, tt.want) { + t.Errorf("GetRequiredInput() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/internal/util/projutil/project_util.go b/internal/util/projutil/project_util.go new file mode 100644 index 0000000..149ec1e --- /dev/null +++ b/internal/util/projutil/project_util.go @@ -0,0 +1,170 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package projutil + +import ( + "fmt" + "os" + "regexp" + "strings" + + log "github.com/sirupsen/logrus" + "github.com/spf13/afero" + "sigs.k8s.io/kubebuilder/v3/pkg/config" + yamlstore "sigs.k8s.io/kubebuilder/v3/pkg/config/store/yaml" + _ "sigs.k8s.io/kubebuilder/v3/pkg/config/v2" // Register config/v2 for `config.New` + _ "sigs.k8s.io/kubebuilder/v3/pkg/config/v3" // Register config/v3 for `config.New` + "sigs.k8s.io/kubebuilder/v3/pkg/machinery" +) + +const ( + // Useful file modes. + DirMode = 0755 + FileMode = 0644 + ExecFileMode = 0755 +) + +const ( + // Go env vars. + GoFlagsEnv = "GOFLAGS" +) + +// Default config file path. +const configFile = "PROJECT" + +// OperatorType - the type of operator +type OperatorType = string + +const ( + // OperatorTypeGo - golang type of operator. + OperatorTypeGo OperatorType = "go" + // OperatorTypeAnsible - ansible type of operator. + OperatorTypeAnsible OperatorType = "ansible" + // OperatorTypeHelm - helm type of operator. + OperatorTypeHelm OperatorType = "helm" + // OperatorTypeHybrid - hybrid type of operator. + operatorTypeHybridHelm OperatorType = "hybridHelm" + // OperatorTypeUnknown - unknown type of operator. + OperatorTypeUnknown OperatorType = "unknown" +) + +type ErrUnknownOperatorType struct { + Type string +} + +func (e ErrUnknownOperatorType) Error() string { + if e.Type == "" { + return "unknown operator type" + } + return fmt.Sprintf(`unknown operator type "%v"`, e.Type) +} + +// HasProjectFile returns true if the project is configured as a kubebuilder +// project. +func HasProjectFile() bool { + _, err := os.Stat(configFile) + if err != nil { + if os.IsNotExist(err) { + return false + } + log.Fatalf("Failed to read PROJECT file to detect kubebuilder project: %v", err) + } + return true +} + +// ReadConfig returns a configuration if a file containing one exists at the +// default path (project root). +func ReadConfig() (config.Config, error) { + store := yamlstore.New(machinery.Filesystem{FS: afero.NewOsFs()}) + if err := store.Load(); err != nil { + return nil, err + } + + return store.Config(), nil +} + +// PluginChainToOperatorType converts a plugin chain to an operator project type. +// TODO(estroz): this can probably be made more robust by checking known plugin keys directly. +func PluginChainToOperatorType(pluginKeys []string) OperatorType { + for _, pluginKey := range pluginKeys { + switch { + case strings.HasPrefix(pluginKey, "go"): + return OperatorTypeGo + case strings.HasPrefix(pluginKey, "helm"): + return OperatorTypeHelm + case strings.HasPrefix(pluginKey, "ansible"): + return OperatorTypeAnsible + case strings.HasPrefix(pluginKey, "hybrid"): + return operatorTypeHybridHelm + } + } + return OperatorTypeUnknown +} + +// GetProjectLayout returns the `layout` field as a comma separated list. +func GetProjectLayout(cfg config.Config) string { + return strings.Join(cfg.GetPluginChain(), ",") +} + +var flagRe = regexp.MustCompile("(.* )?-v(.* )?") + +// SetGoVerbose sets GOFLAGS="${GOFLAGS} -v" if GOFLAGS does not +// already contain "-v" to make "go" command output verbose. +func SetGoVerbose() error { + gf, ok := os.LookupEnv(GoFlagsEnv) + if !ok || len(gf) == 0 { + return os.Setenv(GoFlagsEnv, "-v") + } + if !flagRe.MatchString(gf) { + return os.Setenv(GoFlagsEnv, gf+" -v") + } + return nil +} + +// RewriteFileContents adds newContent to the line after the last occurrence of target in filename's contents, +// then writes the updated contents back to disk. +func RewriteFileContents(filename, target, newContent string) error { + text, err := os.ReadFile(filename) + if err != nil { + return fmt.Errorf("error in getting contents from the file, %v", err) + } + + modifiedContent, err := appendContent(string(text), target, newContent) + if err != nil { + return err + } + + err = os.WriteFile(filename, []byte(modifiedContent), FileMode) + if err != nil { + return fmt.Errorf("error writing modified contents to file, %v", err) + } + + return nil +} + +func appendContent(fileContents, target, newContent string) (string, error) { + labelIndex := strings.LastIndex(fileContents, target) + if labelIndex == -1 { + return "", fmt.Errorf("no prior string %s in newContent", target) + } + + separationIndex := strings.Index(fileContents[labelIndex:], "\n") + if separationIndex == -1 { + return "", fmt.Errorf("no new line at the end of string %s", fileContents[labelIndex:]) + } + + index := labelIndex + separationIndex + 1 + return fileContents[:index] + newContent + fileContents[index:], nil +} diff --git a/internal/util/projutil/projutil_test.go b/internal/util/projutil/projutil_test.go new file mode 100644 index 0000000..7a9c861 --- /dev/null +++ b/internal/util/projutil/projutil_test.go @@ -0,0 +1,98 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package projutil + +import ( + "errors" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Testing projutil helpers", func() { + Describe("Testing RewriteFileContents", func() { + var ( + fileContents string + instruction string + content string + expectedOutput string + ) + It("Should pass when file has instruction", func() { + fileContents = "LABEL operators.operatorframework.io.bundle.mediatype.v1=registry+v1 \n" + + "LABEL operators.operatorframework.io.bundle.manifests.v1=manifests/ \n" + + "LABEL operators.operatorframework.io.bundle.metadata.v1=metadata/ \n" + + "COPY deploy/olm-catalog/memcached-operator/manifests /manifests/ \n" + + instruction = "LABEL" + + content = "LABEL operators.operatorframework.io.bundle.tests.v1=tests/ \n" + + expectedOutput = "LABEL operators.operatorframework.io.bundle.mediatype.v1=registry+v1 \n" + + "LABEL operators.operatorframework.io.bundle.manifests.v1=manifests/ \n" + + "LABEL operators.operatorframework.io.bundle.metadata.v1=metadata/ \n" + + "LABEL operators.operatorframework.io.bundle.tests.v1=tests/ \n" + + "COPY deploy/olm-catalog/memcached-operator/manifests /manifests/ \n" + + Expect(appendContent(fileContents, instruction, content)).To(Equal(expectedOutput)) + }) + + It("Should result in error when file does not have instruction", func() { + fileContents = "LABEL operators.operatorframework.io.bundle.mediatype.v1=registry+v1 \n" + + "LABEL operators.operatorframework.io.bundle.manifests.v1=manifests/ \n" + + "LABEL operators.operatorframework.io.bundle.metadata.v1=metadata/ \n" + + "COPY deploy/olm-catalog/memcached-operator/manifests /manifests/ \n" + + instruction = "ADD" + + content = "ADD operators.operatorframework.io.bundle.tests.v1=tests/ \n" + + expectedOutput = "LABEL operators.operatorframework.io.bundle.mediatype.v1=registry+v1 \n" + + "LABEL operators.operatorframework.io.bundle.manifests.v1=manifests/ \n" + + "LABEL operators.operatorframework.io.bundle.metadata.v1=metadata/ \n" + + "LABEL operators.operatorframework.io.bundle.tests.v1=tests/ \n" + + "COPY deploy/olm-catalog/memcached-operator/manifests /manifests/ \n" + + _, err := appendContent(fileContents, instruction, content) + + Expect(err).Should(MatchError(errors.New("no prior string ADD in newContent"))) + }) + + It("Should result in error as no new line at the end of dockerfile command", func() { + fileContents = "LABEL operators.operatorframework.io.bundle.mediatype.v1=registry+v1 \n" + + "LABEL operators.operatorframework.io.bundle.manifests.v1=manifests/ \n" + + "LABEL operators.operatorframework.io.bundle.metadata.v1=metadata/" + + instruction = "LABEL" + + content = "LABEL operators.operatorframework.io.bundle.tests.v1=tests/ \n" + + expectedOutput = "LABEL operators.operatorframework.io.bundle.mediatype.v1=registry+v1 \n" + + "LABEL operators.operatorframework.io.bundle.manifests.v1=manifests/ \n" + + "LABEL operators.operatorframework.io.bundle.metadata.v1=metadata/ \n" + + "LABEL operators.operatorframework.io.bundle.tests.v1=tests/ \n" + + _, err := appendContent(fileContents, instruction, content) + + Expect(err).ShouldNot((BeNil())) + }) + + }) +}) + +func TestMetadata(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Projutil Helpers suite") +} diff --git a/internal/version/version.go b/internal/version/version.go new file mode 100644 index 0000000..71ccfee --- /dev/null +++ b/internal/version/version.go @@ -0,0 +1,30 @@ +// Copyright 2018 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +// var needs to be used instead of const for ldflags +var ( + Version = "unknown" + GitVersion = "unknown" + GitCommit = "unknown" + KubernetesVersion = "unknown" + + // ImageVersion represents the ansible-operator, helm-operator, and scorecard subproject versions, + // which is used in each plugin to specify binary and/or image versions. This is set to the + // most recent operator-sdk release tag such that samples are generated with the correct versions + // in a release commit. Once each element that uses this version is moved to a separate repo + // and release process, this variable will be removed. + ImageVersion = "unknown" +) diff --git a/test/common/sa_secret.go b/test/common/sa_secret.go new file mode 100644 index 0000000..c33842c --- /dev/null +++ b/test/common/sa_secret.go @@ -0,0 +1,42 @@ +// Copyright 2022 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package common + +import ( + "fmt" + "os" +) + +var saSecretTemplate = `--- +apiVersion: v1 +kind: Secret +type: kubernetes.io/service-account-token +metadata: + name: %s + annotations: + kubernetes.io/service-account.name: "%s" +` + +// GetSASecret writes a service account token secret to a file. It returns a string to the file or an error if it fails to write the file +func GetSASecret(name string, dir string) (string, error) { + secretName := name + "-secret" + fileName := dir + "/" + secretName + ".yaml" + err := os.WriteFile(fileName, []byte(fmt.Sprintf(saSecretTemplate, secretName, name)), 0777) + if err != nil { + return "", err + } + + return fileName, nil +} diff --git a/test/common/scorecard.go b/test/common/scorecard.go new file mode 100644 index 0000000..5f4ffcb --- /dev/null +++ b/test/common/scorecard.go @@ -0,0 +1,122 @@ +// Copyright 2021 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package common + +import ( + "encoding/json" + "fmt" + "os/exec" + "strings" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/operator-framework/ansible-operator-plugins/internal/testutils" + "github.com/operator-framework/api/pkg/apis/scorecard/v1alpha3" +) + +// ScorecardSpec runs a set of scorecard tests common to all operator types. +func ScorecardSpec(tc *testutils.TestContext, operatorType string) func() { + return func() { + var ( + err error + cmd *exec.Cmd + outputBytes []byte + output v1alpha3.TestList + ) + + It("should run a single scorecard test successfully", func() { + cmd = exec.Command(tc.BinaryName, "scorecard", "bundle", + "--selector", "suite=basic", + "--output", "json", + "--wait-time", "2m") + outputBytes, err = tc.Run(cmd) + Expect(err).NotTo(HaveOccurred()) + Expect(json.Unmarshal(outputBytes, &output)).To(Succeed()) + + Expect(output.Items).To(HaveLen(1)) + results := output.Items[0].Status.Results + Expect(results).To(HaveLen(1)) + Expect(results[0].Name).To(Equal("basic-check-spec")) + Expect(results[0].State).To(Equal(v1alpha3.PassState)) + }) + + It("should run all enabled scorecard tests successfully", func() { + cmd = exec.Command(tc.BinaryName, "scorecard", "bundle", + "--output", "json", + "--wait-time", "4m") + outputBytes, err = tc.Run(cmd) + // Some tests are expected to fail, which results in scorecard exiting 1. + // Go tests no longer expect to fail + if strings.ToLower(operatorType) != "go" { + Expect(err).To(HaveOccurred()) + } + Expect(json.Unmarshal(outputBytes, &output)).To(Succeed()) + + expected := map[string]v1alpha3.State{ + // Basic suite. + "basic-check-spec": v1alpha3.PassState, + // OLM suite. + "olm-bundle-validation": v1alpha3.PassState, + "olm-crds-have-validation": v1alpha3.FailState, + "olm-crds-have-resources": v1alpha3.FailState, + "olm-spec-descriptors": v1alpha3.FailState, + // For Ansible/Helm should PASS with a Suggestion + // For Golang should pass because we have status spec and descriptions + "olm-status-descriptors": v1alpha3.PassState, + } + if strings.ToLower(operatorType) == "go" { + // Go projects have generated CRD validation. + expected["olm-crds-have-validation"] = v1alpha3.PassState + // Go generated test operator now has CSV markers + // that allows these validations to pass + expected["olm-crds-have-resources"] = v1alpha3.PassState + expected["olm-spec-descriptors"] = v1alpha3.PassState + expected["olm-status-descriptors"] = v1alpha3.PassState + // The Go sample project tests a custom suite. + expected["customtest1"] = v1alpha3.PassState + expected["customtest2"] = v1alpha3.PassState + } + + Expect(output.Items).To(HaveLen(len(expected))) + for i := 0; i < len(output.Items); i++ { + results := output.Items[i].Status.Results + Expect(results).To(HaveLen(1)) + Expect(results[0].Name).NotTo(BeEmpty()) + fmt.Fprintln(GinkgoWriter, " - Name: ", results[0].Name) + fmt.Fprintln(GinkgoWriter, " Expected: ", expected[results[0].Name]) + fmt.Fprintln(GinkgoWriter, " Output: ", results[0].State) + Expect(results[0].State).To(Equal(expected[results[0].Name])) + } + }) + + It("should configure scorecard storage successfully", func() { + cmd = exec.Command(tc.BinaryName, "scorecard", "bundle", + "--selector", "suite=basic", + "--output", "json", + "--test-output", "/testdata", + "--wait-time", "4m") + outputBytes, err = tc.Run(cmd) + Expect(err).NotTo(HaveOccurred()) + Expect(json.Unmarshal(outputBytes, &output)).To(Succeed()) + + Expect(output.Items).To(HaveLen(1)) + results := output.Items[0].Status.Results + Expect(results).To(HaveLen(1)) + Expect(results[0].Name).To(Equal("basic-check-spec")) + Expect(results[0].State).To(Equal(v1alpha3.PassState)) + }) + } +} diff --git a/test/e2e/ansible/cluster_test.go b/test/e2e/ansible/cluster_test.go new file mode 100644 index 0000000..0eed685 --- /dev/null +++ b/test/e2e/ansible/cluster_test.go @@ -0,0 +1,394 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writifng, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e_ansible_test + +import ( + "encoding/base64" + "fmt" + "path/filepath" + "strings" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + kbtutil "sigs.k8s.io/kubebuilder/v3/pkg/plugin/util" + + "github.com/operator-framework/ansible-operator-plugins/internal/testutils" + "github.com/operator-framework/ansible-operator-plugins/test/common" +) + +var _ = Describe("Running ansible projects", func() { + + var ( + controllerPodName, memcachedDeploymentName, metricsClusterRoleBindingName string + fooSampleFile, memfinSampleFile, memcachedSampleFile string + ) + + Context("built with operator-sdk", func() { + BeforeEach(func() { + metricsClusterRoleBindingName = fmt.Sprintf("%s-metrics-reader", tc.ProjectName) + samplesDir := filepath.Join(tc.Dir, "config", "samples") + fooSampleFile = filepath.Join(samplesDir, fmt.Sprintf("%s_%s_foo.yaml", tc.Group, tc.Version)) + memfinSampleFile = filepath.Join(samplesDir, fmt.Sprintf("%s_%s_memfin.yaml", tc.Group, tc.Version)) + memcachedSampleFile = filepath.Join(samplesDir, + fmt.Sprintf("%s_%s_%s.yaml", tc.Group, tc.Version, strings.ToLower(tc.Kind))) + + By("deploying project on the cluster") + Expect(tc.Make("deploy", "IMG="+tc.ImageName)).To(Succeed()) + }) + + AfterEach(func() { + By("deleting curl pod") + testutils.WrapWarnOutput(tc.Kubectl.Delete(false, "pod", "curl")) + + By("deleting test CR instances") + for _, sample := range []string{memcachedSampleFile, fooSampleFile, memfinSampleFile} { + testutils.WrapWarnOutput(tc.Kubectl.Delete(false, "-f", sample)) + } + + By("cleaning up permissions") + testutils.WrapWarnOutput(tc.Kubectl.Command("delete", "clusterrolebinding", metricsClusterRoleBindingName)) + + By("undeploy project") + testutils.WrapWarn(tc.Make("undeploy")) + + By("ensuring that the namespace was deleted") + testutils.WrapWarnOutput(tc.Kubectl.Wait(false, "namespace", "foo", "--for", "delete", "--timeout", "2m")) + }) + + It("should run correctly in a cluster", func() { + By("checking if the Operator project Pod is running") + verifyControllerUp := func() error { + // Get the controller-manager pod name + podOutput, err := tc.Kubectl.Get( + true, + "pods", "-l", "control-plane=controller-manager", + "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}"+ + "{{ \"\\n\" }}{{ end }}{{ end }}") + if err != nil { + return fmt.Errorf("could not get pods: %v", err) + } + podNames := kbtutil.GetNonEmptyLines(podOutput) + if len(podNames) != 1 { + return fmt.Errorf("expecting 1 pod, have %d", len(podNames)) + } + controllerPodName = podNames[0] + if !strings.Contains(controllerPodName, "controller-manager") { + return fmt.Errorf("expecting pod name %q to contain %q", controllerPodName, "controller-manager") + } + + // Ensure the controller-manager Pod is running. + status, err := tc.Kubectl.Get( + true, + "pods", controllerPodName, "-o", "jsonpath={.status.phase}") + if err != nil { + return fmt.Errorf("failed to get pod status for %q: %v", controllerPodName, err) + } + if status != "Running" { + return fmt.Errorf("controller pod in %s status", status) + } + return nil + } + Eventually(verifyControllerUp, 2*time.Minute, time.Second).Should(Succeed()) + + By("ensuring the created ServiceMonitor for the manager") + _, err := tc.Kubectl.Get( + true, + "ServiceMonitor", + fmt.Sprintf("%s-controller-manager-metrics-monitor", tc.ProjectName)) + Expect(err).NotTo(HaveOccurred()) + + By("ensuring the created metrics Service for the manager") + _, err = tc.Kubectl.Get( + true, + "Service", + fmt.Sprintf("%s-controller-manager-metrics-service", tc.ProjectName)) + Expect(err).NotTo(HaveOccurred()) + + By("create custom resource (Memcached CR)") + _, err = tc.Kubectl.Apply(false, "-f", memcachedSampleFile) + Expect(err).NotTo(HaveOccurred()) + + By("create custom resource (Foo CR)") + _, err = tc.Kubectl.Apply(false, "-f", fooSampleFile) + Expect(err).NotTo(HaveOccurred()) + + By("create custom resource (Memfin CR)") + _, err = tc.Kubectl.Apply(false, "-f", memfinSampleFile) + Expect(err).NotTo(HaveOccurred()) + + By("ensuring the CR gets reconciled") + managerContainerLogs := func() string { + logOutput, err := tc.Kubectl.Logs(controllerPodName, "-c", "manager") + Expect(err).NotTo(HaveOccurred()) + return logOutput + } + Eventually(managerContainerLogs, time.Minute, time.Second).Should(ContainSubstring( + "Ansible-runner exited successfully")) + Eventually(managerContainerLogs, time.Minute, time.Second).ShouldNot(ContainSubstring("failed=1")) + Eventually(managerContainerLogs, time.Minute, time.Second).ShouldNot(ContainSubstring("[Gathering Facts]")) + + By("ensuring no liveness probe fail events") + verifyControllerProbe := func() string { + By("getting the controller-manager events") + eventsOutput, err := tc.Kubectl.Get( + true, + "events", "--field-selector", fmt.Sprintf("involvedObject.name=%s", controllerPodName)) + Expect(err).NotTo(HaveOccurred()) + return eventsOutput + } + Eventually(verifyControllerProbe, time.Minute, time.Second).ShouldNot(ContainSubstring("Killing")) + + By("getting memcached deploy by labels") + getMemcachedDeploymentName := func() string { + memcachedDeploymentName, err = tc.Kubectl.Get( + false, "deployment", + "-l", "app=memcached", "-o", "jsonpath={..metadata.name}") + Expect(err).NotTo(HaveOccurred()) + return memcachedDeploymentName + } + Eventually(getMemcachedDeploymentName, 2*time.Minute, time.Second).ShouldNot(BeEmpty()) + + By("checking the Memcached CR deployment status") + verifyCRUp := func() string { + output, err := tc.Kubectl.Command( + "rollout", "status", "deployment", memcachedDeploymentName) + Expect(err).NotTo(HaveOccurred()) + return output + } + Eventually(verifyCRUp, time.Minute, time.Second).Should(ContainSubstring("successfully rolled out")) + + By("ensuring the created Service for the Memcached CR") + crServiceName, err := tc.Kubectl.Get( + false, + "Service", "-l", "app=memcached") + Expect(err).NotTo(HaveOccurred()) + Expect(len(crServiceName)).NotTo(BeIdenticalTo(0)) + + By("Verifying that a config map owned by the CR has been created") + verifyConfigMap := func() error { + _, err = tc.Kubectl.Get( + false, + "configmap", "test-blacklist-watches") + return err + } + Eventually(verifyConfigMap, time.Minute*2, time.Second).Should(Succeed()) + + By("Ensuring that config map requests skip the cache.") + checkSkipCache := func() string { + logOutput, err := tc.Kubectl.Logs(controllerPodName, "-c", "manager") + Expect(err).NotTo(HaveOccurred()) + return logOutput + } + Eventually(checkSkipCache, time.Minute, time.Second).Should(ContainSubstring("\"Skipping cache lookup" + + "\",\"resource\":{\"IsResourceRequest\":true," + + "\"Path\":\"/api/v1/namespaces/default/configmaps/test-blacklist-watches\"")) + + By("scaling deployment replicas to 2") + _, err = tc.Kubectl.Command( + "scale", "deployment", memcachedDeploymentName, "--replicas", "2") + Expect(err).NotTo(HaveOccurred()) + + By("verifying the deployment automatically scales back down to 1") + verifyMemcachedScalesBack := func() error { + replicas, err := tc.Kubectl.Get( + false, + "deployment", memcachedDeploymentName, "-o", "jsonpath={..spec.replicas}") + Expect(err).NotTo(HaveOccurred()) + if replicas != "1" { + return fmt.Errorf("memcached(CR) deployment with %s replicas", replicas) + } + return nil + } + Eventually(verifyMemcachedScalesBack, time.Minute, time.Second).Should(Succeed()) + + By("updating size to 2 in the CR manifest") + err = kbtutil.ReplaceInFile(memcachedSampleFile, "size: 1", "size: 2") + Expect(err).NotTo(HaveOccurred()) + + By("applying CR manifest with size: 2") + _, err = tc.Kubectl.Apply(false, "-f", memcachedSampleFile) + Expect(err).NotTo(HaveOccurred()) + + By("ensuring the CR gets reconciled after patching it") + managerContainerLogsAfterUpdateCR := func() string { + logOutput, err := tc.Kubectl.Logs(controllerPodName, "-c", "manager") + Expect(err).NotTo(HaveOccurred()) + return logOutput + } + Eventually(managerContainerLogsAfterUpdateCR, time.Minute, time.Second).Should( + ContainSubstring("Ansible-runner exited successfully")) + Eventually(managerContainerLogs, time.Minute, time.Second).ShouldNot(ContainSubstring("failed=1")) + + By("checking Deployment replicas spec is equals 2") + verifyMemcachedPatch := func() error { + replicas, err := tc.Kubectl.Get( + false, + "deployment", memcachedDeploymentName, "-o", "jsonpath={..spec.replicas}") + Expect(err).NotTo(HaveOccurred()) + if replicas != "2" { + return fmt.Errorf("memcached(CR) deployment with %s replicas", replicas) + } + return nil + } + Eventually(verifyMemcachedPatch, time.Minute, time.Second).Should(Succeed()) + + // As of Kubernetes 1.24 a ServiceAccount no longer has a ServiceAccount token secret autogenerated. We have to create it manually here + By("Creating the ServiceAccount token") + secretFile, err := common.GetSASecret(tc.Kubectl.ServiceAccount, tc.Dir) + Expect(err).NotTo(HaveOccurred()) + Eventually(func() error { + _, err = tc.Kubectl.Apply(true, "-f", secretFile) + return err + }, time.Minute, time.Second).Should(Succeed()) + By("annotating the CR") + _, err = tc.Kubectl.Command( + "annotate", "foo", "foo-sample", "test-annotation='step2'") + Expect(err).NotTo(HaveOccurred()) + + Eventually(managerContainerLogs, time.Minute, time.Second).Should(ContainSubstring( + "Ansible-runner exited successfully")) + Eventually(managerContainerLogs, time.Minute, time.Second).Should(ContainSubstring( + "test-annotation found : 'step2'")) + Eventually(managerContainerLogs, time.Minute, time.Second).ShouldNot(ContainSubstring("failed=1")) + Eventually(managerContainerLogs, time.Minute, time.Second).ShouldNot(ContainSubstring("[Gathering Facts]")) + + By("granting permissions to access the metrics and read the token") + _, err = tc.Kubectl.Command("create", "clusterrolebinding", metricsClusterRoleBindingName, + fmt.Sprintf("--clusterrole=%s-metrics-reader", tc.ProjectName), + fmt.Sprintf("--serviceaccount=%s:%s", tc.Kubectl.Namespace, tc.Kubectl.ServiceAccount)) + Expect(err).NotTo(HaveOccurred()) + + By("reading the metrics token") + // Filter token query by service account in case more than one exists in a namespace. + query := fmt.Sprintf(`{.items[?(@.metadata.annotations.kubernetes\.io/service-account\.name=="%s")].data.token}`, + tc.Kubectl.ServiceAccount, + ) + b64Token, err := tc.Kubectl.Get(true, "secrets", "-o=jsonpath="+query) + Expect(err).NotTo(HaveOccurred()) + token, err := base64.StdEncoding.DecodeString(strings.TrimSpace(b64Token)) + Expect(err).NotTo(HaveOccurred()) + Expect(token).ToNot(BeEmpty()) + + By("creating a curl pod") + cmdOpts := []string{ + "run", "curl", "--image=curlimages/curl:7.68.0", "--restart=OnFailure", "--", + "curl", "-v", "-k", "-H", fmt.Sprintf(`Authorization: Bearer %s`, token), + fmt.Sprintf("https://%s-controller-manager-metrics-service.%s.svc:8443/metrics", tc.ProjectName, tc.Kubectl.Namespace), + } + _, err = tc.Kubectl.CommandInNamespace(cmdOpts...) + Expect(err).NotTo(HaveOccurred()) + + By("validating that the curl pod is running as expected") + verifyCurlUp := func() error { + // Validate pod status + status, err := tc.Kubectl.Get( + true, + "pods", "curl", "-o", "jsonpath={.status.phase}") + if err != nil { + return err + } + if status != "Completed" && status != "Succeeded" { + return fmt.Errorf("curl pod in %s status", status) + } + return nil + } + Eventually(verifyCurlUp, 2*time.Minute, time.Second).Should(Succeed()) + + By("checking metrics endpoint serving as expected") + getCurlLogs := func() string { + logOutput, err := tc.Kubectl.Logs("curl") + Expect(err).NotTo(HaveOccurred()) + return logOutput + } + Eventually(getCurlLogs, time.Minute, time.Second).Should(ContainSubstring("< HTTP/2 200")) + + By("getting the CR namespace token") + crNamespace, err := tc.Kubectl.Get( + false, + tc.Kind, + fmt.Sprintf("%s-sample", strings.ToLower(tc.Kind)), + "-o=jsonpath={..metadata.namespace}") + Expect(err).NotTo(HaveOccurred()) + Expect(crNamespace).NotTo(BeEmpty()) + + By("ensuring the operator metrics contains a `resource_created_at` metric for the Memcached CR") + metricExportedMemcachedCR := fmt.Sprintf("resource_created_at_seconds{group=\"%s\","+ + "kind=\"%s\","+ + "name=\"%s-sample\","+ + "namespace=\"%s\","+ + "version=\"%s\"}", + fmt.Sprintf("%s.%s", tc.Group, tc.Domain), + tc.Kind, + strings.ToLower(tc.Kind), + crNamespace, + tc.Version) + Eventually(getCurlLogs, time.Minute, time.Second).Should(ContainSubstring(metricExportedMemcachedCR)) + + By("ensuring the operator metrics contains a `resource_created_at` metric for the Foo CR") + metricExportedFooCR := fmt.Sprintf("resource_created_at_seconds{group=\"%s\","+ + "kind=\"%s\","+ + "name=\"%s-sample\","+ + "namespace=\"%s\","+ + "version=\"%s\"}", + fmt.Sprintf("%s.%s", tc.Group, tc.Domain), + "Foo", + strings.ToLower("Foo"), + crNamespace, + tc.Version) + Eventually(getCurlLogs, time.Minute, time.Second).Should(ContainSubstring(metricExportedFooCR)) + + By("ensuring the operator metrics contains a `resource_created_at` metric for the Memfin CR") + metricExportedMemfinCR := fmt.Sprintf("resource_created_at_seconds{group=\"%s\","+ + "kind=\"%s\","+ + "name=\"%s-sample\","+ + "namespace=\"%s\","+ + "version=\"%s\"}", + fmt.Sprintf("%s.%s", tc.Group, tc.Domain), + "Memfin", + strings.ToLower("Memfin"), + crNamespace, + tc.Version) + Eventually(getCurlLogs, time.Minute, time.Second).Should(ContainSubstring(metricExportedMemfinCR)) + + By("creating a configmap that the finalizer should remove") + _, err = tc.Kubectl.Command("create", "configmap", "deleteme") + Expect(err).NotTo(HaveOccurred()) + + By("deleting Memcached CR manifest") + _, err = tc.Kubectl.Delete(false, "-f", memcachedSampleFile) + Expect(err).NotTo(HaveOccurred()) + + By("ensuring the CR gets reconciled successfully") + managerContainerLogsAfterDeleteCR := func() string { + logOutput, err := tc.Kubectl.Logs(controllerPodName, "-c", "manager") + Expect(err).NotTo(HaveOccurred()) + return logOutput + } + Eventually(managerContainerLogsAfterDeleteCR, time.Minute, time.Second).Should(ContainSubstring( + "Ansible-runner exited successfully")) + Eventually(managerContainerLogsAfterDeleteCR).ShouldNot(ContainSubstring("error")) + + By("ensuring that Memchaced Deployment was removed") + getMemcachedDeployment := func() error { + _, err := tc.Kubectl.Get( + false, "deployment", + memcachedDeploymentName) + return err + } + Eventually(getMemcachedDeployment, time.Minute*2, time.Second).ShouldNot(Succeed()) + }) + }) +}) diff --git a/test/e2e/ansible/local_test.go b/test/e2e/ansible/local_test.go new file mode 100644 index 0000000..5a04e9b --- /dev/null +++ b/test/e2e/ansible/local_test.go @@ -0,0 +1,50 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e_ansible_test + +import ( + "os/exec" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Running ansible projects", func() { + Context("built with operator-sdk", func() { + + BeforeEach(func() { + By("installing CRD's") + err := tc.Make("install") + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + By("uninstalling CRD's") + err := tc.Make("uninstall") + Expect(err).NotTo(HaveOccurred()) + }) + + It("should run correctly locally", func() { + By("running the project") + cmd := exec.Command("make", "run") + err := cmd.Start() + Expect(err).NotTo(HaveOccurred()) + + By("killing the project") + err = cmd.Process.Kill() + Expect(err).NotTo(HaveOccurred()) + }) + }) +}) diff --git a/test/e2e/ansible/olm_test.go b/test/e2e/ansible/olm_test.go new file mode 100644 index 0000000..d1fcec8 --- /dev/null +++ b/test/e2e/ansible/olm_test.go @@ -0,0 +1,58 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e_ansible_test + +import ( + "os/exec" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/operator-framework/ansible-operator-plugins/internal/util/projutil" +) + +var _ = Describe("Integrating ansible Projects with OLM", func() { + Context("with operator-sdk", func() { + const operatorVersion = "0.0.1" + + It("should generate and run a valid OLM bundle and packagemanifests", func() { + By("building the operator bundle image") + err := tc.Make("bundle-build", "BUNDLE_IMG="+tc.BundleImageName) + Expect(err).NotTo(HaveOccurred()) + + By("adding the 'packagemanifests' rule to the Makefile") + err = tc.AddPackagemanifestsTarget(projutil.OperatorTypeAnsible) + Expect(err).NotTo(HaveOccurred()) + + By("generating the operator package manifests") + err = tc.Make("packagemanifests", "IMG="+tc.ImageName) + Expect(err).NotTo(HaveOccurred()) + + By("running the package") + runPkgManCmd := exec.Command(tc.BinaryName, "run", "packagemanifests", + "--install-mode", "AllNamespaces", + "--version", operatorVersion, + "--timeout", "4m") + _, err = tc.Run(runPkgManCmd) + Expect(err).NotTo(HaveOccurred()) + + By("destroying the deployed package manifests-formatted operator") + cleanupPkgManCmd := exec.Command(tc.BinaryName, "cleanup", tc.ProjectName, + "--timeout", "4m") + _, err = tc.Run(cleanupPkgManCmd) + Expect(err).NotTo(HaveOccurred()) + }) + }) +}) diff --git a/test/e2e/ansible/scorecard_test.go b/test/e2e/ansible/scorecard_test.go new file mode 100644 index 0000000..f39d551 --- /dev/null +++ b/test/e2e/ansible/scorecard_test.go @@ -0,0 +1,23 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e_ansible_test + +import ( + . "github.com/onsi/ginkgo/v2" + + "github.com/operator-framework/ansible-operator-plugins/test/common" +) + +var _ = Describe("scorecard", common.ScorecardSpec(&tc, "ansible")) diff --git a/test/e2e/ansible/suite_test.go b/test/e2e/ansible/suite_test.go new file mode 100644 index 0000000..25c6a2c --- /dev/null +++ b/test/e2e/ansible/suite_test.go @@ -0,0 +1,263 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e_ansible_test + +import ( + "fmt" + "os/exec" + "path/filepath" + "strings" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + kbutil "sigs.k8s.io/kubebuilder/v3/pkg/plugin/util" + + "github.com/operator-framework/ansible-operator-plugins/internal/testutils" +) + +// TestE2EAnsible ensures the ansible projects built with the SDK tool by using its binary. +func TestE2EAnsible(t *testing.T) { + if testing.Short() { + t.Skip("skipping Operator SDK E2E Ansible Suite testing in short mode") + } + RegisterFailHandler(Fail) + RunSpecs(t, "E2EAnsible Suite") +} + +var ( + tc testutils.TestContext +) + +// BeforeSuite run before any specs are run to perform the required actions for all e2e ansible tests. +var _ = BeforeSuite(func() { + var err error + + By("creating a new test context") + tc, err = testutils.NewTestContext(testutils.BinaryName, "GO111MODULE=on") + Expect(err).NotTo(HaveOccurred()) + + tc.Domain = "example.com" + tc.Version = "v1alpha1" + tc.Group = "cache" + tc.Kind = "Memcached" + tc.ProjectName = "memcached-operator" + tc.Kubectl.Namespace = fmt.Sprintf("%s-system", tc.ProjectName) + tc.Kubectl.ServiceAccount = fmt.Sprintf("%s-controller-manager", tc.ProjectName) + + By("copying sample to a temporary e2e directory") + Expect(exec.Command("cp", "-r", "../../../testdata/ansible/memcached-operator", tc.Dir).Run()).To(Succeed()) + + By("enabling debug logging in the manager") + err = kbutil.ReplaceInFile(filepath.Join(tc.Dir, "config", "default", "manager_auth_proxy_patch.yaml"), + "- \"--leader-elect\"", "- \"--zap-log-level=2\"\n - \"--leader-elect\"") + Expect(err).NotTo(HaveOccurred()) + + By("preparing the prerequisites on cluster") + tc.InstallPrerequisites() + + By("using dev image for scorecard-test") + err = tc.ReplaceScorecardImagesForDev() + Expect(err).NotTo(HaveOccurred()) + + By("replacing project Dockerfile to use ansible base image with the dev tag") + err = kbutil.ReplaceRegexInFile(filepath.Join(tc.Dir, "Dockerfile"), "quay.io/operator-framework/ansible-operator:.*", "quay.io/operator-framework/ansible-operator:dev") + Expect(err).Should(Succeed()) + + By("adding Memcached mock task to the role") + err = kbutil.InsertCode(filepath.Join(tc.Dir, "roles", strings.ToLower(tc.Kind), "tasks", "main.yml"), + "periodSeconds: 3", memcachedWithBlackListTask) + Expect(err).NotTo(HaveOccurred()) + + By("creating an API definition to add a task to delete the config map") + err = tc.CreateAPI( + "--group", tc.Group, + "--version", tc.Version, + "--kind", "Memfin", + "--generate-role") + Expect(err).NotTo(HaveOccurred()) + + By("updating spec of Memfin sample") + err = kbutil.ReplaceInFile( + filepath.Join(tc.Dir, "config", "samples", fmt.Sprintf("%s_%s_memfin.yaml", tc.Group, tc.Version)), + "# TODO(user): Add fields here", + "foo: bar") + Expect(err).NotTo(HaveOccurred()) + + By("adding task to delete config map") + err = kbutil.ReplaceInFile(filepath.Join(tc.Dir, "roles", "memfin", "tasks", "main.yml"), + "# tasks file for Memfin", taskToDeleteConfigMap) + Expect(err).NotTo(HaveOccurred()) + + By("adding to watches finalizer and blacklist") + err = kbutil.ReplaceInFile(filepath.Join(tc.Dir, "watches.yaml"), + "playbook: playbooks/memcached.yml", memcachedWatchCustomizations) + Expect(err).NotTo(HaveOccurred()) + + By("create API to test watching multiple GVKs") + err = tc.CreateAPI( + "--group", tc.Group, + "--version", tc.Version, + "--kind", "Foo", + "--generate-role") + Expect(err).NotTo(HaveOccurred()) + + By("updating spec of Foo sample") + err = kbutil.ReplaceInFile( + filepath.Join(tc.Dir, "config", "samples", fmt.Sprintf("%s_%s_foo.yaml", tc.Group, tc.Version)), + "# TODO(user): Add fields here", + "foo: bar") + Expect(err).NotTo(HaveOccurred()) + + By("adding task to display annotations of Foo") + err = kbutil.ReplaceInFile(filepath.Join(tc.Dir, "roles", "foo", "tasks", "main.yml"), + "# tasks file for Foo", fooDebugAnnotations) + Expect(err).NotTo(HaveOccurred()) + + By("adding to watches annotations changes") + err = kbutil.ReplaceInFile(filepath.Join(tc.Dir, "watches.yaml"), + "role: foo", fooWatchCustomizations) + Expect(err).NotTo(HaveOccurred()) + + By("adding RBAC permissions for the Memcached Kind") + err = kbutil.ReplaceInFile(filepath.Join(tc.Dir, "config", "rbac", "role.yaml"), + "#+kubebuilder:scaffold:rules", rolesForBaseOperator) + Expect(err).NotTo(HaveOccurred()) + + By("building the project image") + err = tc.Make("docker-build", "IMG="+tc.ImageName) + Expect(err).NotTo(HaveOccurred()) + + onKind, err := tc.IsRunningOnKind() + Expect(err).NotTo(HaveOccurred()) + if onKind { + By("loading the required images into Kind cluster") + Expect(tc.LoadImageToKindCluster()).To(Succeed()) + Expect(tc.LoadImageToKindClusterWithName("quay.io/operator-framework/scorecard-test:dev")).To(Succeed()) + } + + By("generating bundle") + Expect(tc.GenerateBundle()).To(Succeed()) +}) + +// AfterSuite run after all the specs have run, regardless of whether any tests have failed to ensures that +// all be cleaned up +var _ = AfterSuite(func() { + By("uninstalling prerequisites") + tc.UninstallPrerequisites() + + By("destroying container image and work dir") + tc.Destroy() +}) + +const memcachedWithBlackListTask = ` + +- operator_sdk.util.k8s_status: + api_version: cache.example.com/v1alpha1 + kind: Memcached + name: "{{ ansible_operator_meta.name }}" + namespace: "{{ ansible_operator_meta.namespace }}" + status: + test: "hello world" + +- kubernetes.core.k8s: + definition: + kind: Secret + apiVersion: v1 + metadata: + name: test-secret + namespace: "{{ ansible_operator_meta.namespace }}" + data: + test: aGVsbG8K +- name: Get cluster api_groups + set_fact: + api_groups: "{{ lookup('kubernetes.core.k8s', cluster_info='api_groups', kubeconfig=lookup('env', 'K8S_AUTH_KUBECONFIG')) }}" + +- name: create project if projects are available + kubernetes.core.k8s: + definition: + apiVersion: project.openshift.io/v1 + kind: Project + metadata: + name: testing-foo + when: "'project.openshift.io' in api_groups" + +- name: Create ConfigMap to test blacklisted watches + kubernetes.core.k8s: + definition: + kind: ConfigMap + apiVersion: v1 + metadata: + name: test-blacklist-watches + namespace: "{{ ansible_operator_meta.namespace }}" + data: + arbitrary: afdasdfsajsafj + state: present` + +const taskToDeleteConfigMap = `- name: delete configmap for test + kubernetes.core.k8s: + kind: ConfigMap + api_version: v1 + name: deleteme + namespace: default + state: absent` + +const memcachedWatchCustomizations = `playbook: playbooks/memcached.yml + finalizer: + name: cache.example.com/finalizer + role: memfin + blacklist: + - group: "" + version: v1 + kind: ConfigMap` + +const rolesForBaseOperator = ` + ## + ## Apply customize roles for base operator + ## + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +#+kubebuilder:scaffold:rules +` +const fooDebugAnnotations = ` +- name: Fetch annotations + k8s_info: + kind: Foo + api_version: cache.example.com/v1alpha1 + name: "{{ ansible_operator_meta.name }}" + namespace: "{{ ansible_operator_meta.namespace }}" + register: foo_cr_info + +- name: Print annotations + debug: + msg: "test-annotation found : {{ foo_cr_info.resources[0].metadata.annotations['test-annotation'] }}" + when: + - foo_cr_info.resources | length > 0 + - "'test-annotation' in foo_cr_info.resources[0].metadata.annotations | default({})" +` + +const fooWatchCustomizations = `role: foo + watchAnnotationsChanges: true +` diff --git a/testdata/ansible/memcached-operator/.gitignore b/testdata/ansible/memcached-operator/.gitignore new file mode 100644 index 0000000..62fd3e3 --- /dev/null +++ b/testdata/ansible/memcached-operator/.gitignore @@ -0,0 +1,14 @@ + +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +bin + +# editor and IDE paraphernalia +.idea +*.swp +*.swo +*~ diff --git a/testdata/ansible/memcached-operator/Dockerfile b/testdata/ansible/memcached-operator/Dockerfile new file mode 100644 index 0000000..4fb6c38 --- /dev/null +++ b/testdata/ansible/memcached-operator/Dockerfile @@ -0,0 +1,9 @@ +FROM quay.io/operator-framework/ansible-operator:v1.30.0 + +COPY requirements.yml ${HOME}/requirements.yml +RUN ansible-galaxy collection install -r ${HOME}/requirements.yml \ + && chmod -R ug+rwx ${HOME}/.ansible + +COPY watches.yaml ${HOME}/watches.yaml +COPY roles/ ${HOME}/roles/ +COPY playbooks/ ${HOME}/playbooks/ diff --git a/testdata/ansible/memcached-operator/Makefile b/testdata/ansible/memcached-operator/Makefile new file mode 100644 index 0000000..e7aee60 --- /dev/null +++ b/testdata/ansible/memcached-operator/Makefile @@ -0,0 +1,231 @@ +# VERSION defines the project version for the bundle. +# Update this value when you upgrade the version of your project. +# To re-generate a bundle for another specific version without changing the standard setup, you can: +# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2) +# - use environment variables to overwrite this value (e.g export VERSION=0.0.2) +VERSION ?= 0.0.1 + +# CHANNELS define the bundle channels used in the bundle. +# Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable") +# To re-generate a bundle for other specific channels without changing the standard setup, you can: +# - use the CHANNELS as arg of the bundle target (e.g make bundle CHANNELS=candidate,fast,stable) +# - use environment variables to overwrite this value (e.g export CHANNELS="candidate,fast,stable") +ifneq ($(origin CHANNELS), undefined) +BUNDLE_CHANNELS := --channels=$(CHANNELS) +endif + +# DEFAULT_CHANNEL defines the default channel used in the bundle. +# Add a new line here if you would like to change its default config. (E.g DEFAULT_CHANNEL = "stable") +# To re-generate a bundle for any other default channel without changing the default setup, you can: +# - use the DEFAULT_CHANNEL as arg of the bundle target (e.g make bundle DEFAULT_CHANNEL=stable) +# - use environment variables to overwrite this value (e.g export DEFAULT_CHANNEL="stable") +ifneq ($(origin DEFAULT_CHANNEL), undefined) +BUNDLE_DEFAULT_CHANNEL := --default-channel=$(DEFAULT_CHANNEL) +endif +BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL) + +# IMAGE_TAG_BASE defines the docker.io namespace and part of the image name for remote images. +# This variable is used to construct full image tags for bundle and catalog images. +# +# For example, running 'make bundle-build bundle-push catalog-build catalog-push' will build and push both +# example.com/memcached-operator-bundle:$VERSION and example.com/memcached-operator-catalog:$VERSION. +IMAGE_TAG_BASE ?= example.com/memcached-operator + +# BUNDLE_IMG defines the image:tag used for the bundle. +# You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=/:) +BUNDLE_IMG ?= $(IMAGE_TAG_BASE)-bundle:v$(VERSION) + +# BUNDLE_GEN_FLAGS are the flags passed to the operator-sdk generate bundle command +BUNDLE_GEN_FLAGS ?= -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS) + +# USE_IMAGE_DIGESTS defines if images are resolved via tags or digests +# You can enable this value if you would like to use SHA Based Digests +# To enable set flag to true +USE_IMAGE_DIGESTS ?= false +ifeq ($(USE_IMAGE_DIGESTS), true) + BUNDLE_GEN_FLAGS += --use-image-digests +endif + +# Set the Operator SDK version to use. By default, what is installed on the system is used. +# This is useful for CI or a project to utilize a specific version of the operator-sdk toolkit. +OPERATOR_SDK_VERSION ?= v1.30.0 + +# Image URL to use all building/pushing image targets +IMG ?= controller:latest + +.PHONY: all +all: docker-build + +##@ General + +# The help target prints out all targets with their descriptions organized +# beneath their categories. The categories are represented by '##@' and the +# target descriptions by '##'. The awk commands is responsible for reading the +# entire set of makefiles included in this invocation, looking for lines of the +# file as xyz: ## something, and then pretty-format the target and help. Then, +# if there's a line with ##@ something, that gets pretty-printed as a category. +# More info on the usage of ANSI control characters for terminal formatting: +# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters +# More info on the awk command: +# http://linuxcommand.org/lc3_adv_awk.php + +.PHONY: help +help: ## Display this help. + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +##@ Build + +.PHONY: run +ANSIBLE_ROLES_PATH?="$(shell pwd)/roles" +run: ansible-operator ## Run against the configured Kubernetes cluster in ~/.kube/config + $(ANSIBLE_OPERATOR) run + +.PHONY: docker-build +docker-build: ## Build docker image with the manager. + docker build -t ${IMG} . + +.PHONY: docker-push +docker-push: ## Push docker image with the manager. + docker push ${IMG} + +# PLATFORMS defines the target platforms for the manager image be build to provide support to multiple +# architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: +# - able to use docker buildx . More info: https://docs.docker.com/build/buildx/ +# - have enable BuildKit, More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +# - be able to push the image for your registry (i.e. if you do not inform a valid value via IMG=> than the export will fail) +# To properly provided solutions that supports more than one platform you should use this option. +PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le +.PHONY: docker-buildx +docker-buildx: test ## Build and push docker image for the manager for cross-platform support + # copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile + sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross + - docker buildx create --name project-v3-builder + docker buildx use project-v3-builder + - docker buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross . + - docker buildx rm project-v3-builder + rm Dockerfile.cross + +##@ Deployment + +.PHONY: install +install: kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. + $(KUSTOMIZE) build config/crd | kubectl apply -f - + +.PHONY: uninstall +uninstall: kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. + $(KUSTOMIZE) build config/crd | kubectl delete -f - + +.PHONY: deploy +deploy: kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. + cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} + $(KUSTOMIZE) build config/default | kubectl apply -f - + +.PHONY: undeploy +undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. + $(KUSTOMIZE) build config/default | kubectl delete -f - + +OS := $(shell uname -s | tr '[:upper:]' '[:lower:]') +ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/') + +.PHONY: kustomize +KUSTOMIZE = $(shell pwd)/bin/kustomize +kustomize: ## Download kustomize locally if necessary. +ifeq (,$(wildcard $(KUSTOMIZE))) +ifeq (,$(shell which kustomize 2>/dev/null)) + @{ \ + set -e ;\ + mkdir -p $(dir $(KUSTOMIZE)) ;\ + curl -sSLo - https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/v4.5.7/kustomize_v4.5.7_$(OS)_$(ARCH).tar.gz | \ + tar xzf - -C bin/ ;\ + } +else +KUSTOMIZE = $(shell which kustomize) +endif +endif + +.PHONY: ansible-operator +ANSIBLE_OPERATOR = $(shell pwd)/bin/ansible-operator +ansible-operator: ## Download ansible-operator locally if necessary, preferring the $(pwd)/bin path over global if both exist. +ifeq (,$(wildcard $(ANSIBLE_OPERATOR))) +ifeq (,$(shell which ansible-operator 2>/dev/null)) + @{ \ + set -e ;\ + mkdir -p $(dir $(ANSIBLE_OPERATOR)) ;\ + curl -sSLo $(ANSIBLE_OPERATOR) https://github.com/operator-framework/operator-sdk/releases/download/v1.30.0/ansible-operator_$(OS)_$(ARCH) ;\ + chmod +x $(ANSIBLE_OPERATOR) ;\ + } +else +ANSIBLE_OPERATOR = $(shell which ansible-operator) +endif +endif + +.PHONY: operator-sdk +OPERATOR_SDK ?= ./bin/operator-sdk +operator-sdk: ## Download operator-sdk locally if necessary. +ifeq (,$(wildcard $(OPERATOR_SDK))) +ifeq (, $(shell which operator-sdk 2>/dev/null)) + @{ \ + set -e ;\ + mkdir -p $(dir $(OPERATOR_SDK)) ;\ + curl -sSLo $(OPERATOR_SDK) https://github.com/operator-framework/operator-sdk/releases/download/$(OPERATOR_SDK_VERSION)/operator-sdk_$(OS)_$(ARCH) ;\ + chmod +x $(OPERATOR_SDK) ;\ + } +else +OPERATOR_SDK = $(shell which operator-sdk) +endif +endif + +.PHONY: bundle +bundle: kustomize operator-sdk ## Generate bundle manifests and metadata, then validate generated files. + $(OPERATOR_SDK) generate kustomize manifests --interactive=false -q + cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG) + $(KUSTOMIZE) build config/manifests | $(OPERATOR_SDK) generate bundle $(BUNDLE_GEN_FLAGS) + $(OPERATOR_SDK) bundle validate ./bundle + +.PHONY: bundle-build +bundle-build: ## Build the bundle image. + docker build -f bundle.Dockerfile -t $(BUNDLE_IMG) . + +.PHONY: bundle-push +bundle-push: ## Push the bundle image. + $(MAKE) docker-push IMG=$(BUNDLE_IMG) + +.PHONY: opm +OPM = ./bin/opm +opm: ## Download opm locally if necessary. +ifeq (,$(wildcard $(OPM))) +ifeq (,$(shell which opm 2>/dev/null)) + @{ \ + set -e ;\ + mkdir -p $(dir $(OPM)) ;\ + curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.23.0/$(OS)-$(ARCH)-opm ;\ + chmod +x $(OPM) ;\ + } +else +OPM = $(shell which opm) +endif +endif + +# A comma-separated list of bundle images (e.g. make catalog-build BUNDLE_IMGS=example.com/operator-bundle:v0.1.0,example.com/operator-bundle:v0.2.0). +# These images MUST exist in a registry and be pull-able. +BUNDLE_IMGS ?= $(BUNDLE_IMG) + +# The image tag given to the resulting catalog image (e.g. make catalog-build CATALOG_IMG=example.com/operator-catalog:v0.2.0). +CATALOG_IMG ?= $(IMAGE_TAG_BASE)-catalog:v$(VERSION) + +# Set CATALOG_BASE_IMG to an existing catalog image tag to add $BUNDLE_IMGS to that image. +ifneq ($(origin CATALOG_BASE_IMG), undefined) +FROM_INDEX_OPT := --from-index $(CATALOG_BASE_IMG) +endif + +# Build a catalog image by adding bundle images to an empty catalog using the operator package manager tool, 'opm'. +# This recipe invokes 'opm' in 'semver' bundle add mode. For more information on add modes, see: +# https://github.com/operator-framework/community-operators/blob/7f1438c/docs/packaging-operator.md#updating-your-existing-operator +.PHONY: catalog-build +catalog-build: opm ## Build a catalog image. + $(OPM) index add --container-tool docker --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT) + +# Push the catalog image. +.PHONY: catalog-push +catalog-push: ## Push a catalog image. + $(MAKE) docker-push IMG=$(CATALOG_IMG) diff --git a/testdata/ansible/memcached-operator/PROJECT b/testdata/ansible/memcached-operator/PROJECT new file mode 100644 index 0000000..b24bbff --- /dev/null +++ b/testdata/ansible/memcached-operator/PROJECT @@ -0,0 +1,20 @@ +# Code generated by tool. DO NOT EDIT. +# This file is used to track the info used to scaffold your project +# and allow the plugins properly work. +# More info: https://book.kubebuilder.io/reference/project-config.html +domain: example.com +layout: +- ansible.sdk.operatorframework.io/v1 +plugins: + manifests.sdk.operatorframework.io/v2: {} + scorecard.sdk.operatorframework.io/v2: {} +projectName: memcached-operator +resources: +- api: + crdVersion: v1 + namespaced: true + domain: example.com + group: cache + kind: Memcached + version: v1alpha1 +version: "3" diff --git a/testdata/ansible/memcached-operator/bundle.Dockerfile b/testdata/ansible/memcached-operator/bundle.Dockerfile new file mode 100644 index 0000000..432577b --- /dev/null +++ b/testdata/ansible/memcached-operator/bundle.Dockerfile @@ -0,0 +1,17 @@ +FROM scratch + +# Core bundle labels. +LABEL operators.operatorframework.io.bundle.mediatype.v1=registry+v1 +LABEL operators.operatorframework.io.bundle.manifests.v1=manifests/ +LABEL operators.operatorframework.io.bundle.metadata.v1=metadata/ +LABEL operators.operatorframework.io.bundle.package.v1=memcached-operator +LABEL operators.operatorframework.io.bundle.channels.v1=alpha + +# Labels for testing. +LABEL operators.operatorframework.io.test.mediatype.v1=scorecard+v1 +LABEL operators.operatorframework.io.test.config.v1=tests/scorecard/ + +# Copy files to locations specified by labels. +COPY bundle/manifests /manifests/ +COPY bundle/metadata /metadata/ +COPY bundle/tests/scorecard /tests/scorecard/ diff --git a/testdata/ansible/memcached-operator/bundle/manifests/cache.example.com_memcacheds.yaml b/testdata/ansible/memcached-operator/bundle/manifests/cache.example.com_memcacheds.yaml new file mode 100644 index 0000000..9a95005 --- /dev/null +++ b/testdata/ansible/memcached-operator/bundle/manifests/cache.example.com_memcacheds.yaml @@ -0,0 +1,50 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: memcacheds.cache.example.com +spec: + group: cache.example.com + names: + kind: Memcached + listKind: MemcachedList + plural: memcacheds + singular: memcached + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Memcached is the Schema for the memcacheds API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the desired state of Memcached + type: object + x-kubernetes-preserve-unknown-fields: true + status: + description: Status defines the observed state of Memcached + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/testdata/ansible/memcached-operator/bundle/manifests/memcached-operator-controller-manager-metrics-monitor_monitoring.coreos.com_v1_servicemonitor.yaml b/testdata/ansible/memcached-operator/bundle/manifests/memcached-operator-controller-manager-metrics-monitor_monitoring.coreos.com_v1_servicemonitor.yaml new file mode 100644 index 0000000..e7d0a16 --- /dev/null +++ b/testdata/ansible/memcached-operator/bundle/manifests/memcached-operator-controller-manager-metrics-monitor_monitoring.coreos.com_v1_servicemonitor.yaml @@ -0,0 +1,23 @@ +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + app.kubernetes.io/component: metrics + app.kubernetes.io/created-by: memcached-operator + app.kubernetes.io/instance: controller-manager-metrics-monitor + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: servicemonitor + app.kubernetes.io/part-of: memcached-operator + control-plane: controller-manager + name: memcached-operator-controller-manager-metrics-monitor +spec: + endpoints: + - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + path: /metrics + port: https + scheme: https + tlsConfig: + insecureSkipVerify: true + selector: + matchLabels: + control-plane: controller-manager diff --git a/testdata/ansible/memcached-operator/bundle/manifests/memcached-operator-controller-manager-metrics-service_v1_service.yaml b/testdata/ansible/memcached-operator/bundle/manifests/memcached-operator-controller-manager-metrics-service_v1_service.yaml new file mode 100644 index 0000000..d172662 --- /dev/null +++ b/testdata/ansible/memcached-operator/bundle/manifests/memcached-operator-controller-manager-metrics-service_v1_service.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: memcached-operator + app.kubernetes.io/instance: controller-manager-metrics-service + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: service + app.kubernetes.io/part-of: memcached-operator + control-plane: controller-manager + name: memcached-operator-controller-manager-metrics-service +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: https + selector: + control-plane: controller-manager +status: + loadBalancer: {} diff --git a/testdata/ansible/memcached-operator/bundle/manifests/memcached-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml b/testdata/ansible/memcached-operator/bundle/manifests/memcached-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml new file mode 100644 index 0000000..6f58e96 --- /dev/null +++ b/testdata/ansible/memcached-operator/bundle/manifests/memcached-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: memcached-operator + app.kubernetes.io/instance: metrics-reader + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: clusterrole + app.kubernetes.io/part-of: memcached-operator + name: memcached-operator-metrics-reader +rules: +- nonResourceURLs: + - /metrics + verbs: + - get diff --git a/testdata/ansible/memcached-operator/bundle/manifests/memcached-operator.clusterserviceversion.yaml b/testdata/ansible/memcached-operator/bundle/manifests/memcached-operator.clusterserviceversion.yaml new file mode 100644 index 0000000..e1bf0b5 --- /dev/null +++ b/testdata/ansible/memcached-operator/bundle/manifests/memcached-operator.clusterserviceversion.yaml @@ -0,0 +1,261 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + annotations: + alm-examples: |- + [ + { + "apiVersion": "cache.example.com/v1alpha1", + "kind": "Memcached", + "metadata": { + "labels": { + "app.kubernetes.io/created-by": "memcached-operator", + "app.kubernetes.io/instance": "memcached-sample", + "app.kubernetes.io/managed-by": "kustomize", + "app.kubernetes.io/name": "memcached", + "app.kubernetes.io/part-of": "memcached-operator" + }, + "name": "memcached-sample" + }, + "spec": { + "size": 1 + } + } + ] + capabilities: Basic Install + createdAt: "2022-11-08T17:26:37Z" + name: memcached-operator.v0.0.1 + namespace: placeholder +spec: + apiservicedefinitions: {} + customresourcedefinitions: + owned: + - kind: Memcached + name: memcacheds.cache.example.com + version: v1alpha1 + description: Memcached Operator description. TODO. + displayName: Memcached Operator + icon: + - base64data: "" + mediatype: "" + install: + spec: + clusterPermissions: + - rules: + - apiGroups: + - "" + resources: + - secrets + - pods + - pods/exec + - pods/log + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - cache.example.com + resources: + - memcacheds + - memcacheds/status + - memcacheds/finalizers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create + serviceAccountName: memcached-operator-controller-manager + deployments: + - label: + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: memcached-operator + app.kubernetes.io/instance: controller-manager + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: deployment + app.kubernetes.io/part-of: memcached-operator + control-plane: controller-manager + name: memcached-operator-controller-manager + spec: + replicas: 1 + selector: + matchLabels: + control-plane: controller-manager + strategy: {} + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + control-plane: controller-manager + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - arm64 + - ppc64le + - s390x + - key: kubernetes.io/os + operator: In + values: + - linux + containers: + - args: + - --secure-listen-address=0.0.0.0:8443 + - --upstream=http://127.0.0.1:8080/ + - --logtostderr=true + - --v=0 + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.1 + name: kube-rbac-proxy + ports: + - containerPort: 8443 + name: https + protocol: TCP + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 5m + memory: 64Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + - args: + - --health-probe-bind-address=:6789 + - --metrics-bind-address=127.0.0.1:8080 + - --leader-elect + - --leader-election-id=memcached-operator + env: + - name: ANSIBLE_GATHERING + value: explicit + image: quay.io/example/memcached-operator:v0.0.1 + livenessProbe: + httpGet: + path: /healthz + port: 6789 + initialDelaySeconds: 15 + periodSeconds: 20 + name: manager + readinessProbe: + httpGet: + path: /readyz + port: 6789 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 500m + memory: 768Mi + requests: + cpu: 10m + memory: 256Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: memcached-operator-controller-manager + terminationGracePeriodSeconds: 10 + permissions: + - rules: + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + serviceAccountName: memcached-operator-controller-manager + strategy: deployment + installModes: + - supported: false + type: OwnNamespace + - supported: false + type: SingleNamespace + - supported: false + type: MultiNamespace + - supported: true + type: AllNamespaces + keywords: + - memcached-operator + links: + - name: Memcached Operator + url: https://memcached-operator.domain + maintainers: + - email: your@email.com + name: Maintainer Name + maturity: alpha + provider: + name: Provider Name + url: https://your.domain + version: 0.0.1 diff --git a/testdata/ansible/memcached-operator/bundle/metadata/annotations.yaml b/testdata/ansible/memcached-operator/bundle/metadata/annotations.yaml new file mode 100644 index 0000000..2a94286 --- /dev/null +++ b/testdata/ansible/memcached-operator/bundle/metadata/annotations.yaml @@ -0,0 +1,11 @@ +annotations: + # Core bundle annotations. + operators.operatorframework.io.bundle.mediatype.v1: registry+v1 + operators.operatorframework.io.bundle.manifests.v1: manifests/ + operators.operatorframework.io.bundle.metadata.v1: metadata/ + operators.operatorframework.io.bundle.package.v1: memcached-operator + operators.operatorframework.io.bundle.channels.v1: alpha + + # Annotations for testing. + operators.operatorframework.io.test.mediatype.v1: scorecard+v1 + operators.operatorframework.io.test.config.v1: tests/scorecard/ diff --git a/testdata/ansible/memcached-operator/bundle/tests/scorecard/config.yaml b/testdata/ansible/memcached-operator/bundle/tests/scorecard/config.yaml new file mode 100644 index 0000000..104883e --- /dev/null +++ b/testdata/ansible/memcached-operator/bundle/tests/scorecard/config.yaml @@ -0,0 +1,70 @@ +apiVersion: scorecard.operatorframework.io/v1alpha3 +kind: Configuration +metadata: + name: config +stages: +- parallel: true + tests: + - entrypoint: + - scorecard-test + - basic-check-spec + image: quay.io/operator-framework/scorecard-test:v1.30.0 + labels: + suite: basic + test: basic-check-spec-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-bundle-validation + image: quay.io/operator-framework/scorecard-test:v1.30.0 + labels: + suite: olm + test: olm-bundle-validation-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-crds-have-validation + image: quay.io/operator-framework/scorecard-test:v1.30.0 + labels: + suite: olm + test: olm-crds-have-validation-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-crds-have-resources + image: quay.io/operator-framework/scorecard-test:v1.30.0 + labels: + suite: olm + test: olm-crds-have-resources-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-spec-descriptors + image: quay.io/operator-framework/scorecard-test:v1.30.0 + labels: + suite: olm + test: olm-spec-descriptors-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-status-descriptors + image: quay.io/operator-framework/scorecard-test:v1.30.0 + labels: + suite: olm + test: olm-status-descriptors-test + storage: + spec: + mountPath: {} +storage: + spec: + mountPath: {} diff --git a/testdata/ansible/memcached-operator/config/crd/bases/cache.example.com_memcacheds.yaml b/testdata/ansible/memcached-operator/config/crd/bases/cache.example.com_memcacheds.yaml new file mode 100644 index 0000000..b019a46 --- /dev/null +++ b/testdata/ansible/memcached-operator/config/crd/bases/cache.example.com_memcacheds.yaml @@ -0,0 +1,44 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: memcacheds.cache.example.com +spec: + group: cache.example.com + names: + kind: Memcached + listKind: MemcachedList + plural: memcacheds + singular: memcached + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Memcached is the Schema for the memcacheds API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the desired state of Memcached + type: object + x-kubernetes-preserve-unknown-fields: true + status: + description: Status defines the observed state of Memcached + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: true + subresources: + status: {} diff --git a/testdata/ansible/memcached-operator/config/crd/kustomization.yaml b/testdata/ansible/memcached-operator/config/crd/kustomization.yaml new file mode 100644 index 0000000..c8033c3 --- /dev/null +++ b/testdata/ansible/memcached-operator/config/crd/kustomization.yaml @@ -0,0 +1,6 @@ +# This kustomization.yaml is not intended to be run by itself, +# since it depends on service name and namespace that are out of this kustomize package. +# It should be run by config/default +resources: +- bases/cache.example.com_memcacheds.yaml +#+kubebuilder:scaffold:crdkustomizeresource diff --git a/testdata/ansible/memcached-operator/config/default/kustomization.yaml b/testdata/ansible/memcached-operator/config/default/kustomization.yaml new file mode 100644 index 0000000..5850b40 --- /dev/null +++ b/testdata/ansible/memcached-operator/config/default/kustomization.yaml @@ -0,0 +1,30 @@ +# Adds namespace to all resources. +namespace: memcached-operator-system + +# Value of this field is prepended to the +# names of all resources, e.g. a deployment named +# "wordpress" becomes "alices-wordpress". +# Note that it should also match with the prefix (text before '-') of the namespace +# field above. +namePrefix: memcached-operator- + +# Labels to add to all resources and selectors. +#labels: +#- includeSelectors: true +# pairs: +# someName: someValue + +resources: +- ../crd +- ../rbac +- ../manager +# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. +- ../prometheus + +patchesStrategicMerge: +# Protect the /metrics endpoint by putting it behind auth. +# If you want your controller-manager to expose the /metrics +# endpoint w/o any authn/z, please comment the following line. +- manager_auth_proxy_patch.yaml + + diff --git a/testdata/ansible/memcached-operator/config/default/manager_auth_proxy_patch.yaml b/testdata/ansible/memcached-operator/config/default/manager_auth_proxy_patch.yaml new file mode 100644 index 0000000..1f25756 --- /dev/null +++ b/testdata/ansible/memcached-operator/config/default/manager_auth_proxy_patch.yaml @@ -0,0 +1,56 @@ +# This patch inject a sidecar container which is a HTTP proxy for the +# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - arm64 + - ppc64le + - s390x + - key: kubernetes.io/os + operator: In + values: + - linux + containers: + - name: kube-rbac-proxy + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.1 + args: + - "--secure-listen-address=0.0.0.0:8443" + - "--upstream=http://127.0.0.1:8080/" + - "--logtostderr=true" + - "--v=0" + ports: + - containerPort: 8443 + protocol: TCP + name: https + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 5m + memory: 64Mi + - name: manager + args: + - "--health-probe-bind-address=:6789" + - "--metrics-bind-address=127.0.0.1:8080" + - "--leader-elect" + - "--leader-election-id=memcached-operator" diff --git a/testdata/ansible/memcached-operator/config/default/manager_config_patch.yaml b/testdata/ansible/memcached-operator/config/default/manager_config_patch.yaml new file mode 100644 index 0000000..f6f5891 --- /dev/null +++ b/testdata/ansible/memcached-operator/config/default/manager_config_patch.yaml @@ -0,0 +1,10 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager diff --git a/testdata/ansible/memcached-operator/config/manager/kustomization.yaml b/testdata/ansible/memcached-operator/config/manager/kustomization.yaml new file mode 100644 index 0000000..1a4048d --- /dev/null +++ b/testdata/ansible/memcached-operator/config/manager/kustomization.yaml @@ -0,0 +1,8 @@ +resources: +- manager.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +images: +- name: controller + newName: quay.io/example/memcached-operator + newTag: v0.0.1 diff --git a/testdata/ansible/memcached-operator/config/manager/manager.yaml b/testdata/ansible/memcached-operator/config/manager/manager.yaml new file mode 100644 index 0000000..eec8fc3 --- /dev/null +++ b/testdata/ansible/memcached-operator/config/manager/manager.yaml @@ -0,0 +1,99 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: namespace + app.kubernetes.io/instance: system + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: memcached-operator + app.kubernetes.io/part-of: memcached-operator + app.kubernetes.io/managed-by: kustomize + name: system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system + labels: + control-plane: controller-manager + app.kubernetes.io/name: deployment + app.kubernetes.io/instance: controller-manager + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: memcached-operator + app.kubernetes.io/part-of: memcached-operator + app.kubernetes.io/managed-by: kustomize +spec: + selector: + matchLabels: + control-plane: controller-manager + replicas: 1 + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + control-plane: controller-manager + spec: + # TODO(user): Uncomment the following code to configure the nodeAffinity expression + # according to the platforms which are supported by your solution. + # It is considered best practice to support multiple architectures. You can + # build your manager image using the makefile target docker-buildx. + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/arch + # operator: In + # values: + # - amd64 + # - arm64 + # - ppc64le + # - s390x + # - key: kubernetes.io/os + # operator: In + # values: + # - linux + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + containers: + - args: + - --leader-elect + - --leader-election-id=memcached-operator + image: controller:latest + name: manager + env: + - name: ANSIBLE_GATHERING + value: explicit + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + livenessProbe: + httpGet: + path: /healthz + port: 6789 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 6789 + initialDelaySeconds: 5 + periodSeconds: 10 + # TODO(user): Configure the resources accordingly based on the project requirements. + # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: + limits: + cpu: 500m + memory: 768Mi + requests: + cpu: 10m + memory: 256Mi + serviceAccountName: controller-manager + terminationGracePeriodSeconds: 10 diff --git a/testdata/ansible/memcached-operator/config/manifests/bases/memcached-operator.clusterserviceversion.yaml b/testdata/ansible/memcached-operator/config/manifests/bases/memcached-operator.clusterserviceversion.yaml new file mode 100644 index 0000000..1f1eb42 --- /dev/null +++ b/testdata/ansible/memcached-operator/config/manifests/bases/memcached-operator.clusterserviceversion.yaml @@ -0,0 +1,42 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + annotations: + alm-examples: '[]' + capabilities: Basic Install + name: memcached-operator.v0.0.0 + namespace: placeholder +spec: + apiservicedefinitions: {} + customresourcedefinitions: {} + description: Memcached Operator description. TODO. + displayName: Memcached Operator + icon: + - base64data: "" + mediatype: "" + install: + spec: + deployments: null + strategy: "" + installModes: + - supported: false + type: OwnNamespace + - supported: false + type: SingleNamespace + - supported: false + type: MultiNamespace + - supported: true + type: AllNamespaces + keywords: + - memcached-operator + links: + - name: Memcached Operator + url: https://memcached-operator.domain + maintainers: + - email: your@email.com + name: Maintainer Name + maturity: alpha + provider: + name: Provider Name + url: https://your.domain + version: 0.0.0 diff --git a/testdata/ansible/memcached-operator/config/manifests/kustomization.yaml b/testdata/ansible/memcached-operator/config/manifests/kustomization.yaml new file mode 100644 index 0000000..705a7b9 --- /dev/null +++ b/testdata/ansible/memcached-operator/config/manifests/kustomization.yaml @@ -0,0 +1,7 @@ +# These resources constitute the fully configured set of manifests +# used to generate the 'manifests/' directory in a bundle. +resources: +- bases/memcached-operator.clusterserviceversion.yaml +- ../default +- ../samples +- ../scorecard diff --git a/testdata/ansible/memcached-operator/config/prometheus/kustomization.yaml b/testdata/ansible/memcached-operator/config/prometheus/kustomization.yaml new file mode 100644 index 0000000..ed13716 --- /dev/null +++ b/testdata/ansible/memcached-operator/config/prometheus/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- monitor.yaml diff --git a/testdata/ansible/memcached-operator/config/prometheus/monitor.yaml b/testdata/ansible/memcached-operator/config/prometheus/monitor.yaml new file mode 100644 index 0000000..4dfab6b --- /dev/null +++ b/testdata/ansible/memcached-operator/config/prometheus/monitor.yaml @@ -0,0 +1,26 @@ + +# Prometheus Monitor Service (Metrics) +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: servicemonitor + app.kubernetes.io/instance: controller-manager-metrics-monitor + app.kubernetes.io/component: metrics + app.kubernetes.io/created-by: memcached-operator + app.kubernetes.io/part-of: memcached-operator + app.kubernetes.io/managed-by: kustomize + name: controller-manager-metrics-monitor + namespace: system +spec: + endpoints: + - path: /metrics + port: https + scheme: https + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + tlsConfig: + insecureSkipVerify: true + selector: + matchLabels: + control-plane: controller-manager diff --git a/testdata/ansible/memcached-operator/config/rbac/auth_proxy_client_clusterrole.yaml b/testdata/ansible/memcached-operator/config/rbac/auth_proxy_client_clusterrole.yaml new file mode 100644 index 0000000..a2a3b44 --- /dev/null +++ b/testdata/ansible/memcached-operator/config/rbac/auth_proxy_client_clusterrole.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: metrics-reader + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: memcached-operator + app.kubernetes.io/part-of: memcached-operator + app.kubernetes.io/managed-by: kustomize + name: metrics-reader +rules: +- nonResourceURLs: + - "/metrics" + verbs: + - get diff --git a/testdata/ansible/memcached-operator/config/rbac/auth_proxy_role.yaml b/testdata/ansible/memcached-operator/config/rbac/auth_proxy_role.yaml new file mode 100644 index 0000000..b7c9c6f --- /dev/null +++ b/testdata/ansible/memcached-operator/config/rbac/auth_proxy_role.yaml @@ -0,0 +1,24 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: proxy-role + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: memcached-operator + app.kubernetes.io/part-of: memcached-operator + app.kubernetes.io/managed-by: kustomize + name: proxy-role +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create diff --git a/testdata/ansible/memcached-operator/config/rbac/auth_proxy_role_binding.yaml b/testdata/ansible/memcached-operator/config/rbac/auth_proxy_role_binding.yaml new file mode 100644 index 0000000..a4a1a85 --- /dev/null +++ b/testdata/ansible/memcached-operator/config/rbac/auth_proxy_role_binding.yaml @@ -0,0 +1,19 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/instance: proxy-rolebinding + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: memcached-operator + app.kubernetes.io/part-of: memcached-operator + app.kubernetes.io/managed-by: kustomize + name: proxy-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: proxy-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/testdata/ansible/memcached-operator/config/rbac/auth_proxy_service.yaml b/testdata/ansible/memcached-operator/config/rbac/auth_proxy_service.yaml new file mode 100644 index 0000000..ecaef24 --- /dev/null +++ b/testdata/ansible/memcached-operator/config/rbac/auth_proxy_service.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: service + app.kubernetes.io/instance: controller-manager-metrics-service + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: memcached-operator + app.kubernetes.io/part-of: memcached-operator + app.kubernetes.io/managed-by: kustomize + name: controller-manager-metrics-service + namespace: system +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: https + selector: + control-plane: controller-manager diff --git a/testdata/ansible/memcached-operator/config/rbac/kustomization.yaml b/testdata/ansible/memcached-operator/config/rbac/kustomization.yaml new file mode 100644 index 0000000..731832a --- /dev/null +++ b/testdata/ansible/memcached-operator/config/rbac/kustomization.yaml @@ -0,0 +1,18 @@ +resources: +# All RBAC will be applied under this service account in +# the deployment namespace. You may comment out this resource +# if your manager will use a service account that exists at +# runtime. Be sure to update RoleBinding and ClusterRoleBinding +# subjects if changing service account names. +- service_account.yaml +- role.yaml +- role_binding.yaml +- leader_election_role.yaml +- leader_election_role_binding.yaml +# Comment the following 4 lines if you want to disable +# the auth proxy (https://github.com/brancz/kube-rbac-proxy) +# which protects your /metrics endpoint. +- auth_proxy_service.yaml +- auth_proxy_role.yaml +- auth_proxy_role_binding.yaml +- auth_proxy_client_clusterrole.yaml diff --git a/testdata/ansible/memcached-operator/config/rbac/leader_election_role.yaml b/testdata/ansible/memcached-operator/config/rbac/leader_election_role.yaml new file mode 100644 index 0000000..ce7088e --- /dev/null +++ b/testdata/ansible/memcached-operator/config/rbac/leader_election_role.yaml @@ -0,0 +1,44 @@ +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/name: role + app.kubernetes.io/instance: leader-election-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: memcached-operator + app.kubernetes.io/part-of: memcached-operator + app.kubernetes.io/managed-by: kustomize + name: leader-election-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch diff --git a/testdata/ansible/memcached-operator/config/rbac/leader_election_role_binding.yaml b/testdata/ansible/memcached-operator/config/rbac/leader_election_role_binding.yaml new file mode 100644 index 0000000..4ee27a3 --- /dev/null +++ b/testdata/ansible/memcached-operator/config/rbac/leader_election_role_binding.yaml @@ -0,0 +1,19 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/name: rolebinding + app.kubernetes.io/instance: leader-election-rolebinding + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: memcached-operator + app.kubernetes.io/part-of: memcached-operator + app.kubernetes.io/managed-by: kustomize + name: leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: leader-election-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/testdata/ansible/memcached-operator/config/rbac/memcached_editor_role.yaml b/testdata/ansible/memcached-operator/config/rbac/memcached_editor_role.yaml new file mode 100644 index 0000000..cbc4577 --- /dev/null +++ b/testdata/ansible/memcached-operator/config/rbac/memcached_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit memcacheds. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: memcached-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: memcached-operator + app.kubernetes.io/part-of: memcached-operator + app.kubernetes.io/managed-by: kustomize + name: memcached-editor-role +rules: +- apiGroups: + - cache.example.com + resources: + - memcacheds + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cache.example.com + resources: + - memcacheds/status + verbs: + - get diff --git a/testdata/ansible/memcached-operator/config/rbac/memcached_viewer_role.yaml b/testdata/ansible/memcached-operator/config/rbac/memcached_viewer_role.yaml new file mode 100644 index 0000000..b8977ec --- /dev/null +++ b/testdata/ansible/memcached-operator/config/rbac/memcached_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view memcacheds. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: memcached-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: memcached-operator + app.kubernetes.io/part-of: memcached-operator + app.kubernetes.io/managed-by: kustomize + name: memcached-viewer-role +rules: +- apiGroups: + - cache.example.com + resources: + - memcacheds + verbs: + - get + - list + - watch +- apiGroups: + - cache.example.com + resources: + - memcacheds/status + verbs: + - get diff --git a/testdata/ansible/memcached-operator/config/rbac/role.yaml b/testdata/ansible/memcached-operator/config/rbac/role.yaml new file mode 100644 index 0000000..27958f8 --- /dev/null +++ b/testdata/ansible/memcached-operator/config/rbac/role.yaml @@ -0,0 +1,57 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: manager-role +rules: + ## + ## Base operator rules + ## + - apiGroups: + - "" + resources: + - secrets + - pods + - pods/exec + - pods/log + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + ## + ## Rules for cache.example.com/v1alpha1, Kind: Memcached + ## + - apiGroups: + - cache.example.com + resources: + - memcacheds + - memcacheds/status + - memcacheds/finalizers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +#+kubebuilder:scaffold:rules diff --git a/testdata/ansible/memcached-operator/config/rbac/role_binding.yaml b/testdata/ansible/memcached-operator/config/rbac/role_binding.yaml new file mode 100644 index 0000000..cc2b714 --- /dev/null +++ b/testdata/ansible/memcached-operator/config/rbac/role_binding.yaml @@ -0,0 +1,19 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/instance: manager-rolebinding + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: memcached-operator + app.kubernetes.io/part-of: memcached-operator + app.kubernetes.io/managed-by: kustomize + name: manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: manager-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/testdata/ansible/memcached-operator/config/rbac/service_account.yaml b/testdata/ansible/memcached-operator/config/rbac/service_account.yaml new file mode 100644 index 0000000..9712cf0 --- /dev/null +++ b/testdata/ansible/memcached-operator/config/rbac/service_account.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: serviceaccount + app.kubernetes.io/instance: controller-manager-sa + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: memcached-operator + app.kubernetes.io/part-of: memcached-operator + app.kubernetes.io/managed-by: kustomize + name: controller-manager + namespace: system diff --git a/testdata/ansible/memcached-operator/config/samples/cache_v1alpha1_memcached.yaml b/testdata/ansible/memcached-operator/config/samples/cache_v1alpha1_memcached.yaml new file mode 100644 index 0000000..1a37784 --- /dev/null +++ b/testdata/ansible/memcached-operator/config/samples/cache_v1alpha1_memcached.yaml @@ -0,0 +1,12 @@ +apiVersion: cache.example.com/v1alpha1 +kind: Memcached +metadata: + labels: + app.kubernetes.io/name: memcached + app.kubernetes.io/instance: memcached-sample + app.kubernetes.io/part-of: memcached-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: memcached-operator + name: memcached-sample +spec: + size: 1 diff --git a/testdata/ansible/memcached-operator/config/samples/kustomization.yaml b/testdata/ansible/memcached-operator/config/samples/kustomization.yaml new file mode 100644 index 0000000..89d9111 --- /dev/null +++ b/testdata/ansible/memcached-operator/config/samples/kustomization.yaml @@ -0,0 +1,4 @@ +## Append samples of your project ## +resources: +- cache_v1alpha1_memcached.yaml +#+kubebuilder:scaffold:manifestskustomizesamples diff --git a/testdata/ansible/memcached-operator/config/scorecard/bases/config.yaml b/testdata/ansible/memcached-operator/config/scorecard/bases/config.yaml new file mode 100644 index 0000000..c770478 --- /dev/null +++ b/testdata/ansible/memcached-operator/config/scorecard/bases/config.yaml @@ -0,0 +1,7 @@ +apiVersion: scorecard.operatorframework.io/v1alpha3 +kind: Configuration +metadata: + name: config +stages: +- parallel: true + tests: [] diff --git a/testdata/ansible/memcached-operator/config/scorecard/kustomization.yaml b/testdata/ansible/memcached-operator/config/scorecard/kustomization.yaml new file mode 100644 index 0000000..50cd2d0 --- /dev/null +++ b/testdata/ansible/memcached-operator/config/scorecard/kustomization.yaml @@ -0,0 +1,16 @@ +resources: +- bases/config.yaml +patchesJson6902: +- path: patches/basic.config.yaml + target: + group: scorecard.operatorframework.io + version: v1alpha3 + kind: Configuration + name: config +- path: patches/olm.config.yaml + target: + group: scorecard.operatorframework.io + version: v1alpha3 + kind: Configuration + name: config +#+kubebuilder:scaffold:patchesJson6902 diff --git a/testdata/ansible/memcached-operator/config/scorecard/patches/basic.config.yaml b/testdata/ansible/memcached-operator/config/scorecard/patches/basic.config.yaml new file mode 100644 index 0000000..c95b4ec --- /dev/null +++ b/testdata/ansible/memcached-operator/config/scorecard/patches/basic.config.yaml @@ -0,0 +1,10 @@ +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - basic-check-spec + image: quay.io/operator-framework/scorecard-test:v1.30.0 + labels: + suite: basic + test: basic-check-spec-test diff --git a/testdata/ansible/memcached-operator/config/scorecard/patches/olm.config.yaml b/testdata/ansible/memcached-operator/config/scorecard/patches/olm.config.yaml new file mode 100644 index 0000000..f6607c2 --- /dev/null +++ b/testdata/ansible/memcached-operator/config/scorecard/patches/olm.config.yaml @@ -0,0 +1,50 @@ +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-bundle-validation + image: quay.io/operator-framework/scorecard-test:v1.30.0 + labels: + suite: olm + test: olm-bundle-validation-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-crds-have-validation + image: quay.io/operator-framework/scorecard-test:v1.30.0 + labels: + suite: olm + test: olm-crds-have-validation-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-crds-have-resources + image: quay.io/operator-framework/scorecard-test:v1.30.0 + labels: + suite: olm + test: olm-crds-have-resources-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-spec-descriptors + image: quay.io/operator-framework/scorecard-test:v1.30.0 + labels: + suite: olm + test: olm-spec-descriptors-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-status-descriptors + image: quay.io/operator-framework/scorecard-test:v1.30.0 + labels: + suite: olm + test: olm-status-descriptors-test diff --git a/testdata/ansible/memcached-operator/config/testing/debug_logs_patch.yaml b/testdata/ansible/memcached-operator/config/testing/debug_logs_patch.yaml new file mode 100644 index 0000000..3fb3d55 --- /dev/null +++ b/testdata/ansible/memcached-operator/config/testing/debug_logs_patch.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + env: + - name: ANSIBLE_DEBUG_LOGS + value: "TRUE" diff --git a/testdata/ansible/memcached-operator/config/testing/kustomization.yaml b/testdata/ansible/memcached-operator/config/testing/kustomization.yaml new file mode 100644 index 0000000..4109162 --- /dev/null +++ b/testdata/ansible/memcached-operator/config/testing/kustomization.yaml @@ -0,0 +1,23 @@ +# Adds namespace to all resources. +namespace: osdk-test + +namePrefix: osdk- + +# Labels to add to all resources and selectors. +#commonLabels: +# someName: someValue + +patchesStrategicMerge: +- manager_image.yaml +- debug_logs_patch.yaml +- ../default/manager_auth_proxy_patch.yaml + +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- ../crd +- ../rbac +- ../manager +images: +- name: testing + newName: testing-operator diff --git a/testdata/ansible/memcached-operator/config/testing/manager_image.yaml b/testdata/ansible/memcached-operator/config/testing/manager_image.yaml new file mode 100644 index 0000000..e44f542 --- /dev/null +++ b/testdata/ansible/memcached-operator/config/testing/manager_image.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + image: testing diff --git a/testdata/ansible/memcached-operator/config/testing/pull_policy/Always.yaml b/testdata/ansible/memcached-operator/config/testing/pull_policy/Always.yaml new file mode 100644 index 0000000..6b0a8e2 --- /dev/null +++ b/testdata/ansible/memcached-operator/config/testing/pull_policy/Always.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + imagePullPolicy: Always diff --git a/testdata/ansible/memcached-operator/config/testing/pull_policy/IfNotPresent.yaml b/testdata/ansible/memcached-operator/config/testing/pull_policy/IfNotPresent.yaml new file mode 100644 index 0000000..2f52f49 --- /dev/null +++ b/testdata/ansible/memcached-operator/config/testing/pull_policy/IfNotPresent.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + imagePullPolicy: IfNotPresent diff --git a/testdata/ansible/memcached-operator/config/testing/pull_policy/Never.yaml b/testdata/ansible/memcached-operator/config/testing/pull_policy/Never.yaml new file mode 100644 index 0000000..86f13d8 --- /dev/null +++ b/testdata/ansible/memcached-operator/config/testing/pull_policy/Never.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + imagePullPolicy: Never diff --git a/testdata/ansible/memcached-operator/molecule/default/converge.yml b/testdata/ansible/memcached-operator/molecule/default/converge.yml new file mode 100644 index 0000000..9e65e37 --- /dev/null +++ b/testdata/ansible/memcached-operator/molecule/default/converge.yml @@ -0,0 +1,18 @@ +--- +- name: Converge + hosts: localhost + connection: local + gather_facts: no + collections: + - kubernetes.core + + tasks: + - name: Create Namespace + k8s: + api_version: v1 + kind: Namespace + name: '{{ namespace }}' + + - import_tasks: kustomize.yml + vars: + state: present diff --git a/testdata/ansible/memcached-operator/molecule/default/create.yml b/testdata/ansible/memcached-operator/molecule/default/create.yml new file mode 100644 index 0000000..1eeaf92 --- /dev/null +++ b/testdata/ansible/memcached-operator/molecule/default/create.yml @@ -0,0 +1,6 @@ +--- +- name: Create + hosts: localhost + connection: local + gather_facts: false + tasks: [] diff --git a/testdata/ansible/memcached-operator/molecule/default/destroy.yml b/testdata/ansible/memcached-operator/molecule/default/destroy.yml new file mode 100644 index 0000000..9a41e7d --- /dev/null +++ b/testdata/ansible/memcached-operator/molecule/default/destroy.yml @@ -0,0 +1,24 @@ +--- +- name: Destroy + hosts: localhost + connection: local + gather_facts: false + collections: + - kubernetes.core + + tasks: + - import_tasks: kustomize.yml + vars: + state: absent + + - name: Destroy Namespace + k8s: + api_version: v1 + kind: Namespace + name: '{{ namespace }}' + state: absent + + - name: Unset pull policy + command: '{{ kustomize }} edit remove patch pull_policy/{{ operator_pull_policy }}.yaml' + args: + chdir: '{{ config_dir }}/testing' diff --git a/testdata/ansible/memcached-operator/molecule/default/kustomize.yml b/testdata/ansible/memcached-operator/molecule/default/kustomize.yml new file mode 100644 index 0000000..5c801d6 --- /dev/null +++ b/testdata/ansible/memcached-operator/molecule/default/kustomize.yml @@ -0,0 +1,22 @@ +--- +- name: Build kustomize testing overlay + # load_restrictor must be set to none so we can load patch files from the default overlay + command: '{{ kustomize }} build --load-restrictor LoadRestrictionsNone' + args: + chdir: '{{ config_dir }}/testing' + register: resources + changed_when: false + +- name: Set resources to {{ state }} + k8s: + definition: '{{ item }}' + state: '{{ state }}' + wait: no + loop: '{{ resources.stdout | from_yaml_all | list }}' + +- name: Wait for resources to get to {{ state }} + k8s: + definition: '{{ item }}' + state: '{{ state }}' + wait: yes + loop: '{{ resources.stdout | from_yaml_all | list }}' diff --git a/testdata/ansible/memcached-operator/molecule/default/molecule.yml b/testdata/ansible/memcached-operator/molecule/default/molecule.yml new file mode 100644 index 0000000..ea58004 --- /dev/null +++ b/testdata/ansible/memcached-operator/molecule/default/molecule.yml @@ -0,0 +1,36 @@ +--- +dependency: + name: galaxy +driver: + name: delegated +lint: | + set -e + yamllint -d "{extends: relaxed, rules: {line-length: {max: 120}}}" . +platforms: + - name: cluster + groups: + - k8s +provisioner: + name: ansible + lint: | + set -e + ansible-lint + inventory: + group_vars: + all: + namespace: ${TEST_OPERATOR_NAMESPACE:-osdk-test} + host_vars: + localhost: + ansible_python_interpreter: '{{ ansible_playbook_python }}' + config_dir: ${MOLECULE_PROJECT_DIRECTORY}/config + samples_dir: ${MOLECULE_PROJECT_DIRECTORY}/config/samples + operator_image: ${OPERATOR_IMAGE:-""} + operator_pull_policy: ${OPERATOR_PULL_POLICY:-"Always"} + kustomize: ${KUSTOMIZE_PATH:-kustomize} + env: + K8S_AUTH_KUBECONFIG: ${KUBECONFIG:-"~/.kube/config"} +verifier: + name: ansible + lint: | + set -e + ansible-lint diff --git a/testdata/ansible/memcached-operator/molecule/default/prepare.yml b/testdata/ansible/memcached-operator/molecule/default/prepare.yml new file mode 100644 index 0000000..ed40370 --- /dev/null +++ b/testdata/ansible/memcached-operator/molecule/default/prepare.yml @@ -0,0 +1,28 @@ +--- +- name: Prepare + hosts: localhost + connection: local + gather_facts: false + + tasks: + - name: Ensure operator image is set + fail: + msg: | + You must specify the OPERATOR_IMAGE environment variable in order to run the + 'default' scenario + when: not operator_image + + - name: Set testing image + command: '{{ kustomize }} edit set image testing={{ operator_image }}' + args: + chdir: '{{ config_dir }}/testing' + + - name: Set pull policy + command: '{{ kustomize }} edit add patch --path pull_policy/{{ operator_pull_policy }}.yaml' + args: + chdir: '{{ config_dir }}/testing' + + - name: Set testing namespace + command: '{{ kustomize }} edit set namespace {{ namespace }}' + args: + chdir: '{{ config_dir }}/testing' diff --git a/testdata/ansible/memcached-operator/molecule/default/tasks/memcached_test.yml b/testdata/ansible/memcached-operator/molecule/default/tasks/memcached_test.yml new file mode 100644 index 0000000..b13cac0 --- /dev/null +++ b/testdata/ansible/memcached-operator/molecule/default/tasks/memcached_test.yml @@ -0,0 +1,129 @@ +--- +- name: Load CR + set_fact: + custom_resource: "{{ lookup('template', '/'.join([samples_dir, cr_file])) | from_yaml }}" + vars: + cr_file: 'cache_v1alpha1_memcached.yaml' + +- name: Create the cache.example.com/v1alpha1.Memcached + k8s: + state: present + namespace: '{{ namespace }}' + definition: '{{ custom_resource }}' + wait: yes + wait_timeout: 300 + wait_condition: + type: Successful + status: "True" + +- name: Wait 2 minutes for memcached deployment + debug: + var: deploy + until: + - deploy is defined + - deploy.status is defined + - deploy.status.replicas is defined + - deploy.status.replicas == deploy.status.get("availableReplicas", 0) + retries: 12 + delay: 10 + vars: + deploy: '{{ lookup("k8s", + kind="Deployment", + api_version="apps/v1", + namespace=namespace, + label_selector="app=memcached" + )}}' + +- name: Verify custom status exists + assert: + that: debug_cr.status.get("test") == "hello world" + vars: + debug_cr: '{{ lookup("k8s", + kind=custom_resource.kind, + api_version=custom_resource.apiVersion, + namespace=namespace, + resource_name=custom_resource.metadata.name + )}}' + +- when: molecule_yml.scenario.name == "test-local" + block: + - name: Restart the operator by killing the pod + k8s: + state: absent + definition: + api_version: v1 + kind: Pod + metadata: + namespace: '{{ namespace }}' + name: '{{ pod.metadata.name }}' + vars: + pod: '{{ q("k8s", api_version="v1", kind="Pod", namespace=namespace, label_selector="name=memcached-operator").0 }}' + + - name: Wait 2 minutes for operator deployment + debug: + var: deploy + until: + - deploy is defined + - deploy.status is defined + - deploy.status.replicas is defined + - deploy.status.replicas == deploy.status.get("availableReplicas", 0) + retries: 12 + delay: 10 + vars: + deploy: '{{ lookup("k8s", + kind="Deployment", + api_version="apps/v1", + namespace=namespace, + resource_name="memcached-operator" + )}}' + + - name: Wait for reconciliation to have a chance at finishing + pause: + seconds: 15 + + - name: Delete the service that is created. + k8s: + kind: Service + api_version: v1 + namespace: '{{ namespace }}' + name: test-service + state: absent + + - name: Verify that test-service was re-created + debug: + var: service + until: service + retries: 12 + delay: 10 + vars: + service: '{{ lookup("k8s", + kind="Service", + api_version="v1", + namespace=namespace, + resource_name="test-service", + )}}' + +- name: Delete the custom resource + k8s: + state: absent + namespace: '{{ namespace }}' + definition: '{{ custom_resource }}' + +- name: Wait for the custom resource to be deleted + k8s_info: + api_version: '{{ custom_resource.apiVersion }}' + kind: '{{ custom_resource.kind }}' + namespace: '{{ namespace }}' + name: '{{ custom_resource.metadata.name }}' + register: cr + retries: 10 + delay: 6 + until: not cr.resources + failed_when: cr.resources + +- name: Verify the Deployment was deleted (wait 30s) + assert: + that: not lookup('k8s', kind='Deployment', api_version='apps/v1', namespace=namespace, label_selector='app=memcached') + retries: 10 + delay: 3 + diff --git a/testdata/ansible/memcached-operator/molecule/default/verify.yml b/testdata/ansible/memcached-operator/molecule/default/verify.yml new file mode 100644 index 0000000..15f3674 --- /dev/null +++ b/testdata/ansible/memcached-operator/molecule/default/verify.yml @@ -0,0 +1,57 @@ +--- +- name: Verify + hosts: localhost + connection: local + gather_facts: no + collections: + - kubernetes.core + + vars: + ctrl_label: control-plane=controller-manager + + tasks: + - block: + - name: Import all test files from tasks/ + include_tasks: '{{ item }}' + with_fileglob: + - tasks/*_test.yml + rescue: + - name: Retrieve relevant resources + k8s_info: + api_version: '{{ item.api_version }}' + kind: '{{ item.kind }}' + namespace: '{{ namespace }}' + loop: + - api_version: v1 + kind: Pod + - api_version: apps/v1 + kind: Deployment + - api_version: v1 + kind: Secret + - api_version: v1 + kind: ConfigMap + register: debug_resources + + - name: Retrieve Pod logs + k8s_log: + name: '{{ item.metadata.name }}' + namespace: '{{ namespace }}' + container: manager + loop: "{{ q('k8s', api_version='v1', kind='Pod', namespace=namespace, label_selector=ctrl_label) }}" + register: debug_logs + + - name: Output gathered resources + debug: + var: debug_resources + + - name: Output gathered logs + debug: + var: item.log_lines + loop: '{{ debug_logs.results }}' + + - name: Re-emit failure + vars: + failed_task: + result: '{{ ansible_failed_result }}' + fail: + msg: '{{ failed_task }}' diff --git a/testdata/ansible/memcached-operator/molecule/kind/converge.yml b/testdata/ansible/memcached-operator/molecule/kind/converge.yml new file mode 100644 index 0000000..8bd5700 --- /dev/null +++ b/testdata/ansible/memcached-operator/molecule/kind/converge.yml @@ -0,0 +1,24 @@ +--- +- name: Converge + hosts: localhost + connection: local + gather_facts: no + + tasks: + - name: Build operator image + docker_image: + build: + path: '{{ project_dir }}' + pull: no + name: '{{ operator_image }}' + tag: latest + push: no + source: build + force_source: yes + + - name: Load image into kind cluster + command: kind load docker-image --name osdk-test '{{ operator_image }}' + register: result + changed_when: '"not yet present" in result.stdout' + +- import_playbook: ../default/converge.yml diff --git a/testdata/ansible/memcached-operator/molecule/kind/create.yml b/testdata/ansible/memcached-operator/molecule/kind/create.yml new file mode 100644 index 0000000..66a84a1 --- /dev/null +++ b/testdata/ansible/memcached-operator/molecule/kind/create.yml @@ -0,0 +1,8 @@ +--- +- name: Create + hosts: localhost + connection: local + gather_facts: false + tasks: + - name: Create test kind cluster + command: kind create cluster --name osdk-test --kubeconfig {{ kubeconfig }} diff --git a/testdata/ansible/memcached-operator/molecule/kind/destroy.yml b/testdata/ansible/memcached-operator/molecule/kind/destroy.yml new file mode 100644 index 0000000..304cca2 --- /dev/null +++ b/testdata/ansible/memcached-operator/molecule/kind/destroy.yml @@ -0,0 +1,16 @@ +--- +- name: Destroy + hosts: localhost + connection: local + gather_facts: false + collections: + - kubernetes.core + + tasks: + - name: Destroy test kind cluster + command: kind delete cluster --name osdk-test --kubeconfig {{ kubeconfig }} + + - name: Unset pull policy + command: '{{ kustomize }} edit remove patch pull_policy/{{ operator_pull_policy }}.yaml' + args: + chdir: '{{ config_dir }}/testing' diff --git a/testdata/ansible/memcached-operator/molecule/kind/molecule.yml b/testdata/ansible/memcached-operator/molecule/kind/molecule.yml new file mode 100644 index 0000000..534c8ce --- /dev/null +++ b/testdata/ansible/memcached-operator/molecule/kind/molecule.yml @@ -0,0 +1,33 @@ +--- +dependency: + name: galaxy +driver: + name: delegated +platforms: + - name: cluster + groups: + - k8s +provisioner: + name: ansible + playbooks: + prepare: ../default/prepare.yml + verify: ../default/verify.yml + inventory: + group_vars: + all: + namespace: ${TEST_OPERATOR_NAMESPACE:-osdk-test} + host_vars: + localhost: + ansible_python_interpreter: '{{ ansible_playbook_python }}' + config_dir: ${MOLECULE_PROJECT_DIRECTORY}/config + samples_dir: ${MOLECULE_PROJECT_DIRECTORY}/config/samples + project_dir: ${MOLECULE_PROJECT_DIRECTORY} + operator_image: testing-operator + operator_pull_policy: "Never" + kubeconfig: "{{ lookup('env', 'KUBECONFIG') }}" + kustomize: ${KUSTOMIZE_PATH:-kustomize} + env: + K8S_AUTH_KUBECONFIG: ${MOLECULE_EPHEMERAL_DIRECTORY}/kubeconfig + KUBECONFIG: ${MOLECULE_EPHEMERAL_DIRECTORY}/kubeconfig +verifier: + name: ansible diff --git a/testdata/ansible/memcached-operator/playbooks/.placeholder b/testdata/ansible/memcached-operator/playbooks/.placeholder new file mode 100644 index 0000000..e69de29 diff --git a/testdata/ansible/memcached-operator/playbooks/memcached.yml b/testdata/ansible/memcached-operator/playbooks/memcached.yml new file mode 100644 index 0000000..b3ce2c0 --- /dev/null +++ b/testdata/ansible/memcached-operator/playbooks/memcached.yml @@ -0,0 +1,9 @@ +--- +- hosts: localhost + gather_facts: no + collections: + - kubernetes.core + - operator_sdk.util + tasks: + - import_role: + name: "memcached" diff --git a/testdata/ansible/memcached-operator/requirements.yml b/testdata/ansible/memcached-operator/requirements.yml new file mode 100644 index 0000000..6fcdac3 --- /dev/null +++ b/testdata/ansible/memcached-operator/requirements.yml @@ -0,0 +1,10 @@ +--- +collections: + - name: operator_sdk.util + version: "0.4.0" + - name: kubernetes.core + version: "2.4.0" + - name: cloud.common + version: "2.1.1" + - name: community.docker + version: "3.4.0" diff --git a/testdata/ansible/memcached-operator/roles/.placeholder b/testdata/ansible/memcached-operator/roles/.placeholder new file mode 100644 index 0000000..e69de29 diff --git a/testdata/ansible/memcached-operator/roles/memcached/README.md b/testdata/ansible/memcached-operator/roles/memcached/README.md new file mode 100644 index 0000000..c37ca91 --- /dev/null +++ b/testdata/ansible/memcached-operator/roles/memcached/README.md @@ -0,0 +1,43 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, +if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in +defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables +that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set +for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for +users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/testdata/ansible/memcached-operator/roles/memcached/defaults/main.yml b/testdata/ansible/memcached-operator/roles/memcached/defaults/main.yml new file mode 100644 index 0000000..073d527 --- /dev/null +++ b/testdata/ansible/memcached-operator/roles/memcached/defaults/main.yml @@ -0,0 +1,2 @@ +--- +size: 1 diff --git a/testdata/ansible/memcached-operator/roles/memcached/files/.placeholder b/testdata/ansible/memcached-operator/roles/memcached/files/.placeholder new file mode 100644 index 0000000..e69de29 diff --git a/testdata/ansible/memcached-operator/roles/memcached/handlers/main.yml b/testdata/ansible/memcached-operator/roles/memcached/handlers/main.yml new file mode 100644 index 0000000..87ec314 --- /dev/null +++ b/testdata/ansible/memcached-operator/roles/memcached/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for Memcached diff --git a/testdata/ansible/memcached-operator/roles/memcached/meta/main.yml b/testdata/ansible/memcached-operator/roles/memcached/meta/main.yml new file mode 100644 index 0000000..dfab20d --- /dev/null +++ b/testdata/ansible/memcached-operator/roles/memcached/meta/main.yml @@ -0,0 +1,64 @@ +--- +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: license (GPLv2, CC-BY, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # Optionally specify the branch Galaxy will use when accessing the GitHub + # repo for this role. During role install, if no tags are available, + # Galaxy will use this branch. During import Galaxy will access files on + # this branch. If Travis integration is configured, only notifications for this + # branch will be accepted. Otherwise, in all cases, the repo's default branch + # (usually master) will be used. + #github_branch: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. +collections: +- operator_sdk.util +- kubernetes.core diff --git a/testdata/ansible/memcached-operator/roles/memcached/tasks/main.yml b/testdata/ansible/memcached-operator/roles/memcached/tasks/main.yml new file mode 100644 index 0000000..3fad70b --- /dev/null +++ b/testdata/ansible/memcached-operator/roles/memcached/tasks/main.yml @@ -0,0 +1,180 @@ +--- +# tasks file for Memcached +- name: start memcached + kubernetes.core.k8s: + definition: + kind: Deployment + apiVersion: apps/v1 + metadata: + name: '{{ ansible_operator_meta.name }}-memcached' + namespace: '{{ ansible_operator_meta.namespace }}' + labels: + app: memcached + spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + replicas: "{{size}}" + selector: + matchLabels: + app: memcached + template: + metadata: + labels: + app: memcached + spec: + containers: + - name: memcached + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + command: + - memcached + - -m=64 + - -o + - modern + - -v + image: "docker.io/memcached:1.4.36-alpine" + ports: + - containerPort: 11211 + readinessProbe: + tcpSocket: + port: 11211 + initialDelaySeconds: 3 + periodSeconds: 3 + +- name: Check if config exists + ansible.builtin.stat: + path: /tmp/metricsbumped + register: metricsbumped + +# Only run once +- block: + - ansible.builtin.file: + path: /tmp/metricsbumped + state: touch + # Sanity + - name: create sanity_counter + operator_sdk.util.osdk_metric: + name: sanity_counter + description: ensure counter can be created + counter: {} + + - name: create sanity_gauge + operator_sdk.util.osdk_metric: + name: sanity_gauge + description: ensure gauge can be created + gauge: {} + + - name: create sanity_histogram + operator_sdk.util.osdk_metric: + name: sanity_histogram + description: ensure histogram can be created + histogram: {} + + - name: create sanity_summary + operator_sdk.util.osdk_metric: + name: sanity_summary + description: ensure summary can be created + summary: {} + + # Counter + - name: Counter increment test setup + operator_sdk.util.osdk_metric: + name: counter_inc_test + description: create counter to be incremented + counter: {} + + - name: Execute Counter increment test + operator_sdk.util.osdk_metric: + name: counter_inc_test + description: increment counter + counter: + increment: yes + + - name: Counter add test setup + operator_sdk.util.osdk_metric: + name: counter_add_test + description: create counter to be added to + counter: {} + + - name: Counter add test exe + operator_sdk.util.osdk_metric: + name: counter_add_test + description: create counter to be incremented + counter: + add: 2 + + # Gauge + - name: Gauge set test + operator_sdk.util.osdk_metric: + name: gauge_set_test + description: create and set a gauge t0 5 + gauge: + set: 5 + + - name: Gauge add test setup + operator_sdk.util.osdk_metric: + name: gauge_add_test + description: create a gauge + gauge: {} + + - name: Gauge add test + operator_sdk.util.osdk_metric: + name: gauge_add_test + description: Add 7 to the gauge + gauge: + add: 7 + + - name: Gauge subtract test setup + operator_sdk.util.osdk_metric: + name: gauge_sub_test + description: create a gauge + gauge: {} + + - name: Gauge sub test + operator_sdk.util.osdk_metric: + name: gauge_sub_test + description: Add 7 to the gauge + gauge: + subtract: 7 + + - name: Gauge time test + operator_sdk.util.osdk_metric: + name: gauge_time_test + description: set the gauge to current time + gauge: + set_to_current_time: yes + + # Summary + - name: Summary test setup + operator_sdk.util.osdk_metric: + name: summary_test + description: create a summary + summary: {} + + - name: Summary test + operator_sdk.util.osdk_metric: + name: summary_test + description: observe a summary + summary: + observe: 2 + + # Histogram + - name: Histogram test setup + operator_sdk.util.osdk_metric: + name: histogram_test + description: create a histogram + histogram: {} + + - name: Histogram test + operator_sdk.util.osdk_metric: + name: histogram_test + description: observe a histogram + histogram: + observe: 2 + when: not metricsbumped.stat.exists + diff --git a/testdata/ansible/memcached-operator/roles/memcached/templates/.placeholder b/testdata/ansible/memcached-operator/roles/memcached/templates/.placeholder new file mode 100644 index 0000000..e69de29 diff --git a/testdata/ansible/memcached-operator/roles/memcached/vars/main.yml b/testdata/ansible/memcached-operator/roles/memcached/vars/main.yml new file mode 100644 index 0000000..1cd58e1 --- /dev/null +++ b/testdata/ansible/memcached-operator/roles/memcached/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for Memcached diff --git a/testdata/ansible/memcached-operator/watches.yaml b/testdata/ansible/memcached-operator/watches.yaml new file mode 100644 index 0000000..ec0cb28 --- /dev/null +++ b/testdata/ansible/memcached-operator/watches.yaml @@ -0,0 +1,7 @@ +--- +# Use the 'create api' subcommand to add watches to this file. +- version: v1alpha1 + group: cache.example.com + kind: Memcached + playbook: playbooks/memcached.yml +#+kubebuilder:scaffold:watch diff --git a/tools/scripts/fetch b/tools/scripts/fetch new file mode 100755 index 0000000..1a4ab3a --- /dev/null +++ b/tools/scripts/fetch @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +ROOT="$( git rev-parse --show-toplevel )" +DEST="${ROOT}/tools/bin" + +fetch() { + local tool=$1; shift + local ver=$1; shift + + local arch="$(uname -m)" + case "$arch" in + x86_64) arch="amd64" ;; + aarch64) arch="arm64" ;; + *);; + esac + local os="$(uname -s | awk '{ print tolower($0) }')" + + local ver_cmd="" + local fetch_cmd="" + case "$tool" in + "golangci-lint") + ver_cmd="${DEST}/golangci-lint --version 2>/dev/null | cut -d' ' -f4" + fetch_cmd="curl -sSfL \"https://raw.githubusercontent.com/golangci/golangci-lint/v${ver}/install.sh\" | sh -s -- -b \"${DEST}\" \"v${ver}\"" + ;; + "kind") + ver_cmd="${DEST}/kind --version 2>/dev/null | cut -d' ' -f3" + fetch_cmd="(curl -sSfLo '${DEST}/kind' 'https://kind.sigs.k8s.io/dl/v${ver}/kind-${os}-${arch}' && chmod +x ${DEST}/kind)" + ;; + "kubectl") + ver_cmd="${DEST}/kubectl version 2>/dev/null | grep Client | cut -d' ' -f5 | sed 's/\w\+:\"v\(.*\)\",/\1/'" + fetch_cmd="(curl -sSfLo '${DEST}/kubectl' 'https://storage.googleapis.com/kubernetes-release/release/v${ver}/bin/${os}/${arch}/kubectl' && chmod +x ${DEST}/kubectl)" + ;; + "goreleaser") + ver_cmd="${DEST}/goreleaser --version 2>/dev/null | grep version | cut -d' ' -f3" + osCap="$(uname -s)" + archBase="$(uname -m)" + fetch_cmd="(curl -sSfLo '${DEST}/goreleaser.tar.gz' 'https://github.com/goreleaser/goreleaser/releases/download/v${ver}/goreleaser_${osCap}_${archBase}.tar.gz' && tar -xf $DEST/goreleaser.tar.gz -C $DEST)" + ;; + *) + echo "unknown tool $tool" + return 1 + ;; + esac + + if [[ "${ver}" != "$(eval ${ver_cmd})" ]]; then + echo "${tool} missing or not version '${ver}', downloading..." + mkdir -p ${DEST} + eval ${fetch_cmd} + fi +} + +fetch $@ diff --git a/tools/tools.go b/tools/tools.go new file mode 100644 index 0000000..c426f71 --- /dev/null +++ b/tools/tools.go @@ -0,0 +1,7 @@ +// +build tools + +package tools + +import ( + _ "github.com/maxbrunsfeld/counterfeiter/v6" +)