From f4429126d25f1e2c90f2731255df98ad8c1bd222 Mon Sep 17 00:00:00 2001 From: Kevin McDermott Date: Fri, 23 Sep 2022 10:32:34 +0100 Subject: [PATCH] Allow multiple bootstraps with different configs. This allows for multiple bootstrap configs to trigger for the same cluster. The namespace/name of each CBC that has acted is recorded as an annotation on the GitOpsCluster. Co-authored-by: Simon --- api/v1alpha1/clusterbootstrapconfig_types.go | 5 +- .../clusterbootstrapconfig_controller.go | 34 ++++- .../clusterbootstrapconfig_controller_test.go | 144 ++++++++++++++++++ 3 files changed, 175 insertions(+), 8 deletions(-) diff --git a/api/v1alpha1/clusterbootstrapconfig_types.go b/api/v1alpha1/clusterbootstrapconfig_types.go index f52ed68..8588a91 100644 --- a/api/v1alpha1/clusterbootstrapconfig_types.go +++ b/api/v1alpha1/clusterbootstrapconfig_types.go @@ -25,7 +25,10 @@ import ( const defaultWaitDuration = time.Second * 60 -const BootstrappedAnnotation = "capi.weave.works/bootstrapped" +const ( + BootstrappedAnnotation = "capi.weave.works/bootstrapped" + BootstrapConfigsAnnotation = "capi.weave.works/bootstrap-configs" +) // JobTemplate describes a job to create type JobTemplate struct { diff --git a/controllers/clusterbootstrapconfig_controller.go b/controllers/clusterbootstrapconfig_controller.go index 320178e..1d0387a 100644 --- a/controllers/clusterbootstrapconfig_controller.go +++ b/controllers/clusterbootstrapconfig_controller.go @@ -20,6 +20,7 @@ import ( "context" "encoding/json" "fmt" + "strings" "github.com/fluxcd/pkg/runtime/conditions" gitopsv1alpha1 "github.com/weaveworks/cluster-controller/api/v1alpha1" @@ -29,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/clientcmd" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -78,7 +80,7 @@ func (r *ClusterBootstrapConfigReconciler) Reconcile(ctx context.Context, req ct } logger.Info("cluster bootstrap config loaded", "name", clusterBootstrapConfig.ObjectMeta.Name) - clusters, err := r.getClustersBySelector(ctx, req.Namespace, clusterBootstrapConfig.Spec) + clusters, err := r.getClustersBySelector(ctx, req.Namespace, clusterBootstrapConfig) if err != nil { return ctrl.Result{}, fmt.Errorf("failed to getClustersBySelector for bootstrap config %s: %w", req, err) } @@ -114,7 +116,8 @@ func (r *ClusterBootstrapConfigReconciler) Reconcile(ctx context.Context, req ct mergePatch, err := json.Marshal(map[string]interface{}{ "metadata": map[string]interface{}{ "annotations": map[string]interface{}{ - capiv1alpha1.BootstrappedAnnotation: "yes", + capiv1alpha1.BootstrappedAnnotation: "yes", + capiv1alpha1.BootstrapConfigsAnnotation: appendClusterConfigToBootstrappedList(clusterBootstrapConfig, cluster), }, }, }) @@ -128,6 +131,14 @@ func (r *ClusterBootstrapConfigReconciler) Reconcile(ctx context.Context, req ct return ctrl.Result{}, nil } +func appendClusterConfigToBootstrappedList(config capiv1alpha1.ClusterBootstrapConfig, cluster *gitopsv1alpha1.GitopsCluster) string { + current := cluster.GetAnnotations()[capiv1alpha1.BootstrapConfigsAnnotation] + set := sets.NewString(strings.Split(current, ",")...) + id := fmt.Sprintf("%s/%s", config.GetNamespace(), config.GetName()) + set.Insert(id) + return strings.Join(set.List(), ",") +} + // SetupWithManager sets up the controller with the Manager. func (r *ClusterBootstrapConfigReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). @@ -139,9 +150,9 @@ func (r *ClusterBootstrapConfigReconciler) SetupWithManager(mgr ctrl.Manager) er Complete(r) } -func (r *ClusterBootstrapConfigReconciler) getClustersBySelector(ctx context.Context, ns string, spec capiv1alpha1.ClusterBootstrapConfigSpec) ([]*gitopsv1alpha1.GitopsCluster, error) { +func (r *ClusterBootstrapConfigReconciler) getClustersBySelector(ctx context.Context, ns string, config capiv1alpha1.ClusterBootstrapConfig) ([]*gitopsv1alpha1.GitopsCluster, error) { logger := ctrl.LoggerFrom(ctx) - selector, err := metav1.LabelSelectorAsSelector(&spec.ClusterSelector) + selector, err := metav1.LabelSelectorAsSelector(&config.Spec.ClusterSelector) if err != nil { return nil, fmt.Errorf("unable to convert selector: %w", err) } @@ -160,11 +171,11 @@ func (r *ClusterBootstrapConfigReconciler) getClustersBySelector(ctx context.Con for i := range clusterList.Items { cluster := &clusterList.Items[i] - if !conditions.IsReady(cluster) && !spec.RequireClusterProvisioned { + if !conditions.IsReady(cluster) && !config.Spec.RequireClusterProvisioned { logger.Info("cluster discarded - not ready", "phase", cluster.Status) continue } - if spec.RequireClusterProvisioned { + if config.Spec.RequireClusterProvisioned { if !isProvisioned(cluster) { logger.Info("waiting for cluster to be provisioned", "cluster", cluster.Name) continue @@ -172,7 +183,9 @@ func (r *ClusterBootstrapConfigReconciler) getClustersBySelector(ctx context.Con } if metav1.HasAnnotation(cluster.ObjectMeta, capiv1alpha1.BootstrappedAnnotation) { - continue + if alreadyBootstrappedWithConfig(cluster, config) { + continue + } } if cluster.DeletionTimestamp.IsZero() { clusters = append(clusters, cluster) @@ -181,6 +194,13 @@ func (r *ClusterBootstrapConfigReconciler) getClustersBySelector(ctx context.Con return clusters, nil } +func alreadyBootstrappedWithConfig(cluster *gitopsv1alpha1.GitopsCluster, config capiv1alpha1.ClusterBootstrapConfig) bool { + current := cluster.GetAnnotations()[capiv1alpha1.BootstrapConfigsAnnotation] + set := sets.NewString(strings.Split(current, ",")...) + id := fmt.Sprintf("%s/%s", config.GetNamespace(), config.GetName()) + return set.Has(id) +} + // clusterToClusterBootstrapConfig is mapper function that maps clusters to // ClusterBootstrapConfig. func (r *ClusterBootstrapConfigReconciler) clusterToClusterBootstrapConfig(o client.Object) []ctrl.Request { diff --git a/controllers/clusterbootstrapconfig_controller_test.go b/controllers/clusterbootstrapconfig_controller_test.go index 95dc7fe..3de6ff2 100644 --- a/controllers/clusterbootstrapconfig_controller_test.go +++ b/controllers/clusterbootstrapconfig_controller_test.go @@ -138,6 +138,100 @@ func TestReconcile_when_cluster_ready(t *testing.T) { } } +func TestReconcile_when_cluster_ready_bootstrapped_with_same_config(t *testing.T) { + bc := makeTestClusterBootstrapConfig(func(c *capiv1alpha1.ClusterBootstrapConfig) { + c.Spec.RequireClusterReady = true + }) + readyNode := makeNode(map[string]string{ + "node-role.kubernetes.io/control-plane": "", + }, corev1.NodeCondition{ + Type: "Ready", Status: "True", LastHeartbeatTime: metav1.Now(), LastTransitionTime: metav1.Now(), Reason: "KubeletReady", Message: "kubelet is posting ready status"}) + + cl := makeTestCluster(func(c *gitopsv1alpha1.GitopsCluster) { + c.ObjectMeta.Labels = bc.Spec.ClusterSelector.MatchLabels + c.Status.Conditions = append(c.Status.Conditions, makeReadyCondition()) + c.ObjectMeta.Annotations = map[string]string{ + capiv1alpha1.BootstrappedAnnotation: "true", + capiv1alpha1.BootstrapConfigsAnnotation: fmt.Sprintf("%s/%s", bc.Namespace, bc.Name), + } + }) + secret := makeTestSecret(types.NamespacedName{ + Name: cl.GetName() + "-kubeconfig", + Namespace: cl.GetNamespace(), + }, map[string][]byte{"value": []byte("testing")}) + // This cheats by using the local client as the remote client to simplify + // getting the value from the remote client. + reconciler := makeTestReconciler(t, bc, cl, secret, readyNode) + reconciler.configParser = func(b []byte) (client.Client, error) { + return reconciler.Client, nil + } + + result, err := reconciler.Reconcile(context.TODO(), ctrl.Request{NamespacedName: types.NamespacedName{ + Name: bc.GetName(), + Namespace: bc.GetNamespace(), + }}) + if err != nil { + t.Fatal(err) + } + if !result.IsZero() { + t.Fatalf("want empty result, got %v", result) + } + var jobs batchv1.JobList + if err := reconciler.List(context.TODO(), &jobs, client.InNamespace(testNamespace)); err != nil { + t.Fatal(err) + } + if l := len(jobs.Items); l != 0 { + t.Fatalf("found %d jobs, want %d", l, 0) + } +} + +func TestReconcile_when_cluster_ready_bootstrapped_with_different_config(t *testing.T) { + bc := makeTestClusterBootstrapConfig(func(c *capiv1alpha1.ClusterBootstrapConfig) { + c.Spec.RequireClusterReady = true + }) + readyNode := makeNode(map[string]string{ + "node-role.kubernetes.io/control-plane": "", + }, corev1.NodeCondition{ + Type: "Ready", Status: "True", LastHeartbeatTime: metav1.Now(), LastTransitionTime: metav1.Now(), Reason: "KubeletReady", Message: "kubelet is posting ready status"}) + + cl := makeTestCluster(func(c *gitopsv1alpha1.GitopsCluster) { + c.ObjectMeta.Labels = bc.Spec.ClusterSelector.MatchLabels + c.ObjectMeta.Annotations = map[string]string{ + capiv1alpha1.BootstrappedAnnotation: "true", + capiv1alpha1.BootstrapConfigsAnnotation: "unknown/unknown", + } + c.Status.Conditions = append(c.Status.Conditions, makeReadyCondition()) + }) + secret := makeTestSecret(types.NamespacedName{ + Name: cl.GetName() + "-kubeconfig", + Namespace: cl.GetNamespace(), + }, map[string][]byte{"value": []byte("testing")}) + // This cheats by using the local client as the remote client to simplify + // getting the value from the remote client. + reconciler := makeTestReconciler(t, bc, cl, secret, readyNode) + reconciler.configParser = func(b []byte) (client.Client, error) { + return reconciler.Client, nil + } + + result, err := reconciler.Reconcile(context.TODO(), ctrl.Request{NamespacedName: types.NamespacedName{ + Name: bc.GetName(), + Namespace: bc.GetNamespace(), + }}) + if err != nil { + t.Fatal(err) + } + if !result.IsZero() { + t.Fatalf("want empty result, got %v", result) + } + var jobs batchv1.JobList + if err := reconciler.List(context.TODO(), &jobs, client.InNamespace(testNamespace)); err != nil { + t.Fatal(err) + } + if l := len(jobs.Items); l != 1 { + t.Fatalf("found %d jobs, want %d", l, 1) + } +} + func TestReconcile_when_cluster_provisioned(t *testing.T) { bc := makeTestClusterBootstrapConfig(func(c *capiv1alpha1.ClusterBootstrapConfig) { c.Spec.RequireClusterProvisioned = true @@ -244,6 +338,56 @@ func TestReconcile_when_cluster_no_matching_labels(t *testing.T) { assertNoJobsCreated(t, reconciler.Client) } +func TestReconcile_when_cluster_ready_bootstrapped_with_multiple_config(t *testing.T) { + // Multiple configs can bootstrap the same cluster + // If the reconciled cluster is in that list (anywhere) then we don't create + // jobs. + bc := makeTestClusterBootstrapConfig(func(c *capiv1alpha1.ClusterBootstrapConfig) { + c.Spec.RequireClusterReady = true + }) + readyNode := makeNode(map[string]string{ + "node-role.kubernetes.io/control-plane": "", + }, corev1.NodeCondition{ + Type: "Ready", Status: "True", LastHeartbeatTime: metav1.Now(), LastTransitionTime: metav1.Now(), Reason: "KubeletReady", Message: "kubelet is posting ready status"}) + + cl := makeTestCluster(func(c *gitopsv1alpha1.GitopsCluster) { + c.ObjectMeta.Labels = bc.Spec.ClusterSelector.MatchLabels + c.ObjectMeta.Annotations = map[string]string{ + capiv1alpha1.BootstrappedAnnotation: "true", + capiv1alpha1.BootstrapConfigsAnnotation: fmt.Sprintf("%s,%s/%s", "unknown/unknown", bc.GetNamespace(), bc.GetName()), + } + c.Status.Conditions = append(c.Status.Conditions, makeReadyCondition()) + }) + secret := makeTestSecret(types.NamespacedName{ + Name: cl.GetName() + "-kubeconfig", + Namespace: cl.GetNamespace(), + }, map[string][]byte{"value": []byte("testing")}) + // This cheats by using the local client as the remote client to simplify + // getting the value from the remote client. + reconciler := makeTestReconciler(t, bc, cl, secret, readyNode) + reconciler.configParser = func(b []byte) (client.Client, error) { + return reconciler.Client, nil + } + + result, err := reconciler.Reconcile(context.TODO(), ctrl.Request{NamespacedName: types.NamespacedName{ + Name: bc.GetName(), + Namespace: bc.GetNamespace(), + }}) + if err != nil { + t.Fatal(err) + } + if !result.IsZero() { + t.Fatalf("want empty result, got %v", result) + } + var jobs batchv1.JobList + if err := reconciler.List(context.TODO(), &jobs, client.InNamespace(testNamespace)); err != nil { + t.Fatal(err) + } + if l := len(jobs.Items); l != 0 { + t.Fatalf("found %d jobs, want %d", l, 0) + } +} + func TestReconcile_when_empty_label_selector(t *testing.T) { // When the label selector is empty, we don't want any jobs created, rather // than a job for all clusters.