Skip to content

Commit

Permalink
Merge pull request #143 from zakkg3/fix-e2e-updateall
Browse files Browse the repository at this point in the history
Fix e2e & updateall
  • Loading branch information
zakkg3 authored Sep 25, 2024
2 parents aa9435f + 92dcb84 commit 71c0d40
Show file tree
Hide file tree
Showing 14 changed files with 473 additions and 44 deletions.
1 change: 1 addition & 0 deletions .github/install_latest_podman.sh
100644 → 100755
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
#!/bin/bash
sudo apt-get update
sudo apt-get -y upgrade
sudo apt-get -y install podman
10 changes: 5 additions & 5 deletions .github/workflows/e2e-testing.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,11 @@ jobs:
strategy:
matrix:
kind-node-images:
- kindest/node:v1.29.2@sha256:51a1434a5397193442f0be2a297b488b6c919ce8a3931be0ce822606ea5ca245
- kindest/node:v1.28.7@sha256:9bc6c451a289cf96ad0bbaf33d416901de6fd632415b076ab05f5fa7e4f65c58
- kindest/node:v1.27.3@sha256:3966ac761ae0136263ffdb6cfd4db23ef8a83cba8a463690e98317add2c9ba72
- kindest/node:v1.26.6@sha256:6e2d8b28a5b601defe327b98bd1c2d1930b49e5d8c512e1895099e4504007adb
- kindest/node:v1.25.11@sha256:227fa11ce74ea76a0474eeefb84cb75d8dad1b08638371ecf0e86259b35be0c8
- kindest/node:v1.31.0@sha256:53df588e04085fd41ae12de0c3fe4c72f7013bba32a20e7325357a1ac94ba865
- kindest/node:v1.30.4@sha256:976ea815844d5fa93be213437e3ff5754cd599b040946b5cca43ca45c2047114
- kindest/node:v1.29.8@sha256:d46b7aa29567e93b27f7531d258c372e829d7224b25e3fc6ffdefed12476d3aa
- kindest/node:v1.28.13@sha256:45d319897776e11167e4698f6b14938eb4d52eb381d9e3d7a9086c16c69a8110


steps:
- name: Checkout
Expand Down
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,6 @@
src/__pycache__/
src/tests/__pycache__/
yaml/Object_example/debug-*
.vscode
.coverage
lcov.info
8 changes: 6 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
IMG_NAMESPACE = flag5
IMG_NAME = clustersecret
IMG_FQNAME = $(IMG_NAMESPACE)/$(IMG_NAME)
IMG_VERSION = 0.0.10
IMG_VERSION = 0.0.11

.PHONY: container push clean
all: container
Expand Down Expand Up @@ -69,5 +69,9 @@ chart-update:
helm package charts/cluster-secret/ -d docs/
helm repo index ./docs

dev-run:
dev-prepare:
kubectl apply -f ./yaml/00_rbac.yaml
kubectl apply -f ./yaml/01_crd.yaml

dev-run: dev-prepare
kopf run ./src/handlers.py --verbose -A
2 changes: 0 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,6 @@
## Kubernetes ClusterSecret
[*clustersecret.com*](https://clustersecret.com/)

# note clustersecret.io domain is deprecated. use clustersecret.com from now on.

Cluster wide secrets

ClusterSecret operator makes sure all the matching namespaces have the secret available and up to date.
Expand Down
6 changes: 3 additions & 3 deletions charts/cluster-secret/Chart.yaml
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
apiVersion: v2
name: cluster-secret
description: ClusterSecret Operator
kubeVersion: '>= 1.16.0-0'
kubeVersion: '>= 1.25.0-0'
type: application
version: 0.4.2
version: 0.4.3
icon: https://clustersecret.com/assets/csninjasmall.png
sources:
- https://github.com/zakkg3/ClusterSecret
appVersion: "0.0.10"
appVersion: "0.0.11"
maintainers:
- email: [email protected]
name: zakkg3
2 changes: 1 addition & 1 deletion charts/cluster-secret/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ Clustersecrets automates this. It keep track of any modification in your secret

## Requirements

Current is 0.0.10 tested on > 1.27.1
Current is 0.0.11 tested on > 1.27.1
Version 0.0.9 is tested for Kubernetes >= 1.19 up to 1.27.1

For older kubernes (<1.19) use the image tag "0.0.6" in yaml/02_deployment.yaml
Expand Down
6 changes: 5 additions & 1 deletion charts/cluster-secret/templates/deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,10 @@ spec:
labels:
app: clustersecret
{{- include "cluster-secret.selectorLabels" . | nindent 8 }}
annotations:
{{- range $key, $value := .Values.podAnnotations }}
{{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 8 }}
{{- end }}
spec:
securityContext:
runAsUser: 100 # 100 is set by the container and can NOT be changed here - this would result in a getpwuid() error
Expand Down Expand Up @@ -59,4 +63,4 @@ spec:
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}
5 changes: 4 additions & 1 deletion charts/cluster-secret/values.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
imagePullSecrets: []
image:
repository: quay.io/clustersecret/clustersecret
tag: 0.0.10
tag: 0.0.11
# use tag-alt for ARM and other alternative builds - read the readme for more information
# If Clustersecret is about to create a secret and then it founds it exists:
# Default is to ignore it. (to not loose any unintentional data)
Expand All @@ -15,3 +15,6 @@ nodeSelector: {}
tolerations: []

affinity: {}

# Additional Pod annotations
podAnnotations: {}
90 changes: 65 additions & 25 deletions src/handlers.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import logging
import sys
from typing import Any, Dict, List, Optional

import kopf
Expand All @@ -14,15 +15,14 @@

from os_utils import in_cluster

csecs: Dict[str, Any] = {}

# Loading kubeconfig
if in_cluster():
if "unittest" not in sys.modules:
# Loading kubeconfig
config.load_incluster_config()
else:
# Loading using the local kubevonfig.
config.load_kube_config()
if in_cluster():
# Loading kubeconfig
config.load_incluster_config()
else:
# Loading using the local kubevonfig.
config.load_kube_config()

v1 = client.CoreV1Api()
custom_objects_api = client.CustomObjectsApi()
Expand Down Expand Up @@ -92,7 +92,7 @@ def on_field_match_namespace(
uid=uid,
name=name,
namespace=namespace,
data=body.get('data'),
body=body,
synced_namespace=updated_matched,
))

Expand All @@ -113,6 +113,8 @@ def on_field_data(
body: Dict[str, Any],
meta: kopf.Meta,
name: str,
namespace: Optional[str],
uid: str,
logger: logging.Logger,
**_,
):
Expand All @@ -126,9 +128,14 @@ def on_field_data(

secret_type = body.get('type', 'Opaque')

cached_cluster_secret = csecs_cache.get_cluster_secret(uid)
if cached_cluster_secret is None:
logger.error('Received an event for an unknown ClusterSecret.')

updated_syncedns = syncedns.copy()
for ns in syncedns:
logger.info(f'Re Syncing secret {name} in ns {ns}')
body = client.V1Secret(
ns_sec_body = client.V1Secret(
api_version='v1',
data={str(key): str(value) for key, value in new.items()},
kind='Secret',
Expand All @@ -140,14 +147,42 @@ def on_field_data(
),
type=secret_type,
)
logger.debug(f'body: {body}')
logger.debug(f'body: {ns_sec_body}')
# Ensuring the secret still exist.
if secret_exists(logger=logger, name=name, namespace=ns, v1=v1):
response = v1.replace_namespaced_secret(name=name, namespace=ns, body=body)
response = v1.replace_namespaced_secret(name=name, namespace=ns, body=ns_sec_body)
else:
response = v1.create_namespaced_secret(namespace=ns, body=body)
try:
v1.read_namespace(name=ns)
except client.exceptions.ApiException as e:
if e.status != 404:
raise
response = f'Namespace {ns} not found'
updated_syncedns.remove(ns)
logger.info(f'Namespace {ns} not found while Syncing secret {name}')
else:
response = v1.create_namespaced_secret(namespace=ns, body=ns_sec_body)
logger.debug(response)

if updated_syncedns != syncedns:
# Patch synced_ns field
logger.debug(f'Patching clustersecret {name} in namespace {namespace}')
body = patch_clustersecret_status(
logger=logger,
name=name,
new_status={'create_fn': {'syncedns': updated_syncedns}},
custom_objects_api=custom_objects_api,
)

# Updating the cache
csecs_cache.set_cluster_secret(BaseClusterSecret(
uid=uid,
name=name,
namespace=namespace or "",
body=body,
synced_namespace=updated_syncedns,
))


@kopf.on.resume('clustersecret.io', 'v1', 'clustersecrets')
@kopf.on.create('clustersecret.io', 'v1', 'clustersecrets')
Expand All @@ -164,8 +199,8 @@ async def create_fn(

# sync in all matched NS
logger.info(f'Syncing on Namespaces: {matchedns}')
for namespace in matchedns:
sync_secret(logger, namespace, body, v1)
for ns in matchedns:
sync_secret(logger, ns, body, v1)

# store status in memory
cached_cluster_secret = csecs_cache.get_cluster_secret(uid)
Expand All @@ -176,8 +211,8 @@ async def create_fn(
csecs_cache.set_cluster_secret(BaseClusterSecret(
uid=uid,
name=name,
namespace=namespace,
data=body.get('data'),
namespace=namespace or "",
body=body,
synced_namespace=matchedns,
))

Expand All @@ -193,10 +228,10 @@ async def namespace_watcher(logger: logging.Logger, meta: kopf.Meta, **_):
logger.debug(f'New namespace created: {new_ns} re-syncing')
ns_new_list = []
for cluster_secret in csecs_cache.all_cluster_secret():
obj_body = cluster_secret['body']
name = obj_body['metadata']['name']
obj_body = cluster_secret.body
name = cluster_secret.name

matcheddns = cluster_secret['syncedns']
matcheddns = cluster_secret.synced_namespace

logger.debug(f'Old matched namespace: {matcheddns} - name: {name}')
ns_new_list = get_ns_list(logger, obj_body, v1)
Expand All @@ -211,11 +246,16 @@ async def namespace_watcher(logger: logging.Logger, meta: kopf.Meta, **_):
)

# if there is a new matching ns, refresh cache
cluster_secret.namespace = ns_new_list
cluster_secret.synced_namespace = ns_new_list
csecs_cache.set_cluster_secret(cluster_secret)

# update ns_new_list on the object so then we also delete from there
return {'syncedns': ns_new_list}
# update ns_new_list on the object so then we also delete from there
patch_clustersecret_status(
logger=logger,
name=cluster_secret.name,
new_status={'create_fn': {'syncedns': ns_new_list}},
custom_objects_api=custom_objects_api,
)


@kopf.on.startup()
Expand Down Expand Up @@ -243,8 +283,8 @@ async def startup_fn(logger: logging.Logger, **_):
BaseClusterSecret(
uid=metadata.get('uid'),
name=metadata.get('name'),
namespace=metadata.get('namespace'),
data=item.get('data'),
namespace=metadata.get('namespace', ''),
body=item,
synced_namespace=item.get('status', {}).get('create_fn', {}).get('syncedns', []),
)
)
2 changes: 1 addition & 1 deletion src/kubernetes_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def patch_clustersecret_status(
logger.debug(f'Updated clustersecret manifest: {clustersecret}')

# Perform a patch operation to update the custom resource
custom_objects_api.patch_cluster_custom_object(
return custom_objects_api.patch_cluster_custom_object(
group=group,
version=version,
plural=plural,
Expand Down
2 changes: 1 addition & 1 deletion src/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,5 +7,5 @@ class BaseClusterSecret(BaseModel):
uid: str
name: str
namespace: str
data: Dict[str, Any]
body: Dict[str, Any]
synced_namespace: List[str]
4 changes: 2 additions & 2 deletions src/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
kopf===1.35.3
kopf===1.37.2
kubernetes===19.15.0
setuptools>=65.5.1 # not directly required, pinned by Snyk to avoid a vulnerability
pydantic==2.3.0
pydantic==2.4.0
Loading

0 comments on commit 71c0d40

Please sign in to comment.