From 35d14dec67765b2466daee5f3fff3680bdc483d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Volkan=20=C3=96z=C3=A7elik?= Date: Sat, 15 Jun 2024 16:08:53 +0300 Subject: [PATCH] Minor fixes in makefiles (#1000) * Rephrase warning message When the API requested a non-existent secret, the error message we got was too alarming and confusing. This commit softens that message and provides additional context. Signed-off-by: Volkan Ozcelik * Fix typo Signed-off-by: Volkan Ozcelik * Fix minor error Signed-off-by: Volkan Ozcelik * Increase timeouts from 60s to 120s Signed-off-by: Volkan Ozcelik --------- Signed-off-by: Volkan Ozcelik --- app/safe/internal/server/route/fetch/fetch.go | 3 +- env-vars-export.txt | 66 ------------------- hack/helm-delete.sh | 4 +- hack/install-vsecm-to-eks.sh | 4 +- makefiles/VSecMDeploy.mk | 6 +- 5 files changed, 9 insertions(+), 74 deletions(-) delete mode 100644 env-vars-export.txt diff --git a/app/safe/internal/server/route/fetch/fetch.go b/app/safe/internal/server/route/fetch/fetch.go index c4b14602..fa935675 100644 --- a/app/safe/internal/server/route/fetch/fetch.go +++ b/app/safe/internal/server/route/fetch/fetch.go @@ -93,7 +93,8 @@ func Fetch( secret, err := collection.ReadSecret(cid, workloadId) if err != nil { - log.WarnLn(&cid, "Fetch: Problem reading secret", err.Error()) + log.WarnLn(&cid, "Fetch: Attemted to read secret from disk.") + log.TraceLn(&cid, "Likely expected error. No need to panic:", err.Error()) } log.TraceLn(&cid, "Fetch: workloadId", workloadId) diff --git a/env-vars-export.txt b/env-vars-export.txt deleted file mode 100644 index 57cfa978..00000000 --- a/env-vars-export.txt +++ /dev/null @@ -1,66 +0,0 @@ -# /* -# | Protect your secrets, protect your sensitive data. -# : Explore VMware Secrets Manager docs at https://vsecm.com/ -# / keep your secrets... secret -# >/ -# <>/' Copyright 2023-present VMware Secrets Manager contributors. -# >/' SPDX-License-Identifier: BSD-2-Clause -# */ - -# This is the output of ./hack/list-env-vars.sh: - -SPIFFE_ENDPOINT_SOCKET -SPIFFE_TRUST_DOMAIN -VSECM_BACKOFF_DELAY -VSECM_BACKOFF_MAX_RETRIES -VSECM_BACKOFF_MAX_WAIT -VSECM_BACKOFF_MODE -VSECM_INIT_CONTAINER_POLL_INTERVAL -VSECM_INIT_CONTAINER_WAIT_BEFORE_EXIT -VSECM_KEYGEN_DECRYPT -VSECM_KEYGEN_EXPORTED_SECRET_PATH -VSECM_KEYGEN_ROOT_KEY_PATH -VSECM_LOG_LEVEL -VSECM_LOG_SECRET_FINGERPRINTS -VSECM_NAMESPACE_SYSTEM -VSECM_PROBE_LIVENESS_PORT -VSECM_PROBE_READINESS_PORT -VSECM_ROOT_KEY_INPUT_MODE_MANUAL -VSECM_ROOT_KEY_NAME -VSECM_ROOT_KEY_PATH -VSECM_SAFE_BACKING_STORE -VSECM_SAFE_BOOTSTRAP_TIMEOUT -VSECM_SAFE_DATA_PATH -VSECM_SAFE_ENDPOINT_URL -VSECM_SAFE_FIPS_COMPLIANT -VSECM_SAFE_IV_INITIALIZATION_INTERVAL -VSECM_SAFE_K8S_SECRET_BUFFER_SIZE -VSECM_SAFE_ROOT_KEY_STORE -VSECM_SAFE_SECRET_BACKUP_COUNT -VSECM_SAFE_SECRET_BUFFER_SIZE -VSECM_SAFE_SECRET_DELETE_BUFFER_SIZE -VSECM_SAFE_SOURCE_ACQUISITION_TIMEOUT -VSECM_SPIFFEID_PREFIX_SAFE -VSECM_SAFE_STORE_WORKLOAD_SECRET_AS_K8S_SECRET_PREFIX -VSECM_SAFE_SYNC_DELETED_SECRETS -VSECM_SAFE_SYNC_EXPIRED_SECRETS -VSECM_SAFE_SYNC_INTERPOLATED_K8S_SECRETS -VSECM_SAFE_SYNC_ROOT_KEY_INTERVAL -VSECM_SAFE_SYNC_SECRETS_INTERVAL -VSECM_SAFE_TLS_PORT -VSECM_SENTINEL_ENABLE_OIDC_RESOURCE_SERVER -VSECM_SENTINEL_INIT_COMMAND_PATH -VSECM_SENTINEL_INIT_COMMAND_WAIT_AFTER_INIT_COMPLETE -VSECM_SENTINEL_INIT_COMMAND_WAIT_BEFORE_EXEC -VSECM_SENTINEL_LOGGER_URL -VSECM_SENTINEL_OIDC_PROVIDER_BASE_URL -VSECM_SENTINEL_SECRET_GENERATION_PREFIX -VSECM_SPIFFEID_PREFIX_SENTINEL -VSECM_SIDECAR_ERROR_THRESHOLD -VSECM_SIDECAR_EXPONENTIAL_BACKOFF_MULTIPLIER -VSECM_SIDECAR_MAX_POLL_INTERVAL -VSECM_SIDECAR_POLL_INTERVAL -VSECM_SIDECAR_SECRETS_PATH -VSECM_SIDECAR_SUCCESS_THRESHOLD -VSECM_SPIFFEID_PREFIX_WORKLOAD diff --git a/hack/helm-delete.sh b/hack/helm-delete.sh index be2998a5..0b692c59 100755 --- a/hack/helm-delete.sh +++ b/hack/helm-delete.sh @@ -38,7 +38,7 @@ check_namespace_deleted() { if kubectl get deployment vsecm-sentinel -n "$VSECM_NS"; then kubectl delete deployment vsecm-sentinel -n "$VSECM_NS" || \ { echo "Failed to delete vsecm-sentinel deployment"; exit 1; } - kubectl wait --for=delete pod -l app=vsecm-sentinel -n "$VSECM_NS" --timeout=60s || \ + kubectl wait --for=delete pod -l app=vsecm-sentinel -n "$VSECM_NS" --timeout=120s || \ { echo "Timeout or error while waiting for vsecm-sentinel pods to delete"; exit 1; } else echo "vsecm-sentinel deployment does not exist. Skipping delete." @@ -47,7 +47,7 @@ fi if kubectl get deployment vsecm-safe -n "$VSECM_NS"; then kubectl delete deployment vsecm-safe -n "$VSECM_NS" || \ { echo "Failed to delete vsecm-safe deployment"; exit 1; } - kubectl wait --for=delete pod -l app=vsecm-safe -n "$VSECM_NS" --timeout=60s || \ + kubectl wait --for=delete pod -l app=vsecm-safe -n "$VSECM_NS" --timeout=120s || \ { echo "Timeout or error while waiting for vsecm-safe pods to delete"; exit 1; } else echo "vsecm-safe deployment does not exist. Skipping delete." diff --git a/hack/install-vsecm-to-eks.sh b/hack/install-vsecm-to-eks.sh index d4be7feb..25f06651 100755 --- a/hack/install-vsecm-to-eks.sh +++ b/hack/install-vsecm-to-eks.sh @@ -20,8 +20,8 @@ helm install vsecm vsecm/vsecm echo "verifying vsecm installation" -kubectl wait --timeout=60s --for=condition=Available deployment -n vsecm-system vsecm-sentinel +kubectl wait --timeout=120s --for=condition=Available deployment -n vsecm-system vsecm-sentinel echo "vsecm-sentinel: deployment available" -kubectl wait --timeout=60s --for=condition=Available deployment -n vsecm-system vsecm-safe +kubectl wait --timeout=120s --for=condition=Available deployment -n vsecm-system vsecm-safe echo "vsecm-safe: deployment available" echo "vsecm installation successful" diff --git a/makefiles/VSecMDeploy.mk b/makefiles/VSecMDeploy.mk index aa9ba7bd..6de0b7a4 100644 --- a/makefiles/VSecMDeploy.mk +++ b/makefiles/VSecMDeploy.mk @@ -40,7 +40,7 @@ deploy-spire: kubectl apply -f ${MANIFESTS_BASE_PATH}/crds; \ kubectl apply -f ${MANIFESTS_BASE_PATH}/spire.yaml; \ echo "verifying SPIRE installation"; \ - kubectl wait --for=condition=ready pod spire-server-0 --timeout=60s -n $(VSECM_NAMESPACE_SPIRE_SERVER) \ + kubectl wait --for=condition=ready pod spire-server-0 --timeout=120s -n $(VSECM_NAMESPACE_SPIRE_SERVER); \ echo "spire-server: deployment available"; \ echo "spire installation successful"; \ echo "sleeping for 15 seconds for webhooks to become responsive"; \ @@ -96,9 +96,9 @@ deploy-fips-eks: deploy-spire .PHONY: post-deploy post-deploy: echo "verifying vsecm installation" - kubectl wait --timeout=60s --for=condition=Available deployment -n $(VSECM_NAMESPACE_SYSTEM) vsecm-sentinel + kubectl wait --timeout=120s --for=condition=Available deployment -n $(VSECM_NAMESPACE_SYSTEM) vsecm-sentinel echo "vsecm-sentinel: deployment available" - kubectl wait --for=condition=ready pod vsecm-safe-0 --timeout=60s -n $(VSECM_NAMESPACE_SYSTEM) + kubectl wait --for=condition=ready pod vsecm-safe-0 --timeout=120s -n $(VSECM_NAMESPACE_SYSTEM) echo "vsecm-safe: deployment available" echo "vsecm installation successful"