Skip to content

Commit

Permalink
Merge pull request #16 from weaveworks/multi-bootstrap-annotation
Browse files Browse the repository at this point in the history
Allow multiple bootstraps with different configs.
  • Loading branch information
bigkevmcd authored Sep 23, 2022
2 parents 5fee3fb + f442912 commit d36def5
Show file tree
Hide file tree
Showing 3 changed files with 175 additions and 8 deletions.
5 changes: 4 additions & 1 deletion api/v1alpha1/clusterbootstrapconfig_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,10 @@ import (

const defaultWaitDuration = time.Second * 60

const BootstrappedAnnotation = "capi.weave.works/bootstrapped"
const (
BootstrappedAnnotation = "capi.weave.works/bootstrapped"
BootstrapConfigsAnnotation = "capi.weave.works/bootstrap-configs"
)

// JobTemplate describes a job to create
type JobTemplate struct {
Expand Down
34 changes: 27 additions & 7 deletions controllers/clusterbootstrapconfig_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"context"
"encoding/json"
"fmt"
"strings"

"github.com/fluxcd/pkg/runtime/conditions"
gitopsv1alpha1 "github.com/weaveworks/cluster-controller/api/v1alpha1"
Expand All @@ -29,6 +30,7 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/clientcmd"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
Expand Down Expand Up @@ -78,7 +80,7 @@ func (r *ClusterBootstrapConfigReconciler) Reconcile(ctx context.Context, req ct
}
logger.Info("cluster bootstrap config loaded", "name", clusterBootstrapConfig.ObjectMeta.Name)

clusters, err := r.getClustersBySelector(ctx, req.Namespace, clusterBootstrapConfig.Spec)
clusters, err := r.getClustersBySelector(ctx, req.Namespace, clusterBootstrapConfig)
if err != nil {
return ctrl.Result{}, fmt.Errorf("failed to getClustersBySelector for bootstrap config %s: %w", req, err)
}
Expand Down Expand Up @@ -114,7 +116,8 @@ func (r *ClusterBootstrapConfigReconciler) Reconcile(ctx context.Context, req ct
mergePatch, err := json.Marshal(map[string]interface{}{
"metadata": map[string]interface{}{
"annotations": map[string]interface{}{
capiv1alpha1.BootstrappedAnnotation: "yes",
capiv1alpha1.BootstrappedAnnotation: "yes",
capiv1alpha1.BootstrapConfigsAnnotation: appendClusterConfigToBootstrappedList(clusterBootstrapConfig, cluster),
},
},
})
Expand All @@ -128,6 +131,14 @@ func (r *ClusterBootstrapConfigReconciler) Reconcile(ctx context.Context, req ct
return ctrl.Result{}, nil
}

func appendClusterConfigToBootstrappedList(config capiv1alpha1.ClusterBootstrapConfig, cluster *gitopsv1alpha1.GitopsCluster) string {
current := cluster.GetAnnotations()[capiv1alpha1.BootstrapConfigsAnnotation]
set := sets.NewString(strings.Split(current, ",")...)
id := fmt.Sprintf("%s/%s", config.GetNamespace(), config.GetName())
set.Insert(id)
return strings.Join(set.List(), ",")
}

// SetupWithManager sets up the controller with the Manager.
func (r *ClusterBootstrapConfigReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
Expand All @@ -139,9 +150,9 @@ func (r *ClusterBootstrapConfigReconciler) SetupWithManager(mgr ctrl.Manager) er
Complete(r)
}

func (r *ClusterBootstrapConfigReconciler) getClustersBySelector(ctx context.Context, ns string, spec capiv1alpha1.ClusterBootstrapConfigSpec) ([]*gitopsv1alpha1.GitopsCluster, error) {
func (r *ClusterBootstrapConfigReconciler) getClustersBySelector(ctx context.Context, ns string, config capiv1alpha1.ClusterBootstrapConfig) ([]*gitopsv1alpha1.GitopsCluster, error) {
logger := ctrl.LoggerFrom(ctx)
selector, err := metav1.LabelSelectorAsSelector(&spec.ClusterSelector)
selector, err := metav1.LabelSelectorAsSelector(&config.Spec.ClusterSelector)
if err != nil {
return nil, fmt.Errorf("unable to convert selector: %w", err)
}
Expand All @@ -160,19 +171,21 @@ func (r *ClusterBootstrapConfigReconciler) getClustersBySelector(ctx context.Con
for i := range clusterList.Items {
cluster := &clusterList.Items[i]

if !conditions.IsReady(cluster) && !spec.RequireClusterProvisioned {
if !conditions.IsReady(cluster) && !config.Spec.RequireClusterProvisioned {
logger.Info("cluster discarded - not ready", "phase", cluster.Status)
continue
}
if spec.RequireClusterProvisioned {
if config.Spec.RequireClusterProvisioned {
if !isProvisioned(cluster) {
logger.Info("waiting for cluster to be provisioned", "cluster", cluster.Name)
continue
}
}

if metav1.HasAnnotation(cluster.ObjectMeta, capiv1alpha1.BootstrappedAnnotation) {
continue
if alreadyBootstrappedWithConfig(cluster, config) {
continue
}
}
if cluster.DeletionTimestamp.IsZero() {
clusters = append(clusters, cluster)
Expand All @@ -181,6 +194,13 @@ func (r *ClusterBootstrapConfigReconciler) getClustersBySelector(ctx context.Con
return clusters, nil
}

func alreadyBootstrappedWithConfig(cluster *gitopsv1alpha1.GitopsCluster, config capiv1alpha1.ClusterBootstrapConfig) bool {
current := cluster.GetAnnotations()[capiv1alpha1.BootstrapConfigsAnnotation]
set := sets.NewString(strings.Split(current, ",")...)
id := fmt.Sprintf("%s/%s", config.GetNamespace(), config.GetName())
return set.Has(id)
}

// clusterToClusterBootstrapConfig is mapper function that maps clusters to
// ClusterBootstrapConfig.
func (r *ClusterBootstrapConfigReconciler) clusterToClusterBootstrapConfig(o client.Object) []ctrl.Request {
Expand Down
144 changes: 144 additions & 0 deletions controllers/clusterbootstrapconfig_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,100 @@ func TestReconcile_when_cluster_ready(t *testing.T) {
}
}

func TestReconcile_when_cluster_ready_bootstrapped_with_same_config(t *testing.T) {
bc := makeTestClusterBootstrapConfig(func(c *capiv1alpha1.ClusterBootstrapConfig) {
c.Spec.RequireClusterReady = true
})
readyNode := makeNode(map[string]string{
"node-role.kubernetes.io/control-plane": "",
}, corev1.NodeCondition{
Type: "Ready", Status: "True", LastHeartbeatTime: metav1.Now(), LastTransitionTime: metav1.Now(), Reason: "KubeletReady", Message: "kubelet is posting ready status"})

cl := makeTestCluster(func(c *gitopsv1alpha1.GitopsCluster) {
c.ObjectMeta.Labels = bc.Spec.ClusterSelector.MatchLabels
c.Status.Conditions = append(c.Status.Conditions, makeReadyCondition())
c.ObjectMeta.Annotations = map[string]string{
capiv1alpha1.BootstrappedAnnotation: "true",
capiv1alpha1.BootstrapConfigsAnnotation: fmt.Sprintf("%s/%s", bc.Namespace, bc.Name),
}
})
secret := makeTestSecret(types.NamespacedName{
Name: cl.GetName() + "-kubeconfig",
Namespace: cl.GetNamespace(),
}, map[string][]byte{"value": []byte("testing")})
// This cheats by using the local client as the remote client to simplify
// getting the value from the remote client.
reconciler := makeTestReconciler(t, bc, cl, secret, readyNode)
reconciler.configParser = func(b []byte) (client.Client, error) {
return reconciler.Client, nil
}

result, err := reconciler.Reconcile(context.TODO(), ctrl.Request{NamespacedName: types.NamespacedName{
Name: bc.GetName(),
Namespace: bc.GetNamespace(),
}})
if err != nil {
t.Fatal(err)
}
if !result.IsZero() {
t.Fatalf("want empty result, got %v", result)
}
var jobs batchv1.JobList
if err := reconciler.List(context.TODO(), &jobs, client.InNamespace(testNamespace)); err != nil {
t.Fatal(err)
}
if l := len(jobs.Items); l != 0 {
t.Fatalf("found %d jobs, want %d", l, 0)
}
}

func TestReconcile_when_cluster_ready_bootstrapped_with_different_config(t *testing.T) {
bc := makeTestClusterBootstrapConfig(func(c *capiv1alpha1.ClusterBootstrapConfig) {
c.Spec.RequireClusterReady = true
})
readyNode := makeNode(map[string]string{
"node-role.kubernetes.io/control-plane": "",
}, corev1.NodeCondition{
Type: "Ready", Status: "True", LastHeartbeatTime: metav1.Now(), LastTransitionTime: metav1.Now(), Reason: "KubeletReady", Message: "kubelet is posting ready status"})

cl := makeTestCluster(func(c *gitopsv1alpha1.GitopsCluster) {
c.ObjectMeta.Labels = bc.Spec.ClusterSelector.MatchLabels
c.ObjectMeta.Annotations = map[string]string{
capiv1alpha1.BootstrappedAnnotation: "true",
capiv1alpha1.BootstrapConfigsAnnotation: "unknown/unknown",
}
c.Status.Conditions = append(c.Status.Conditions, makeReadyCondition())
})
secret := makeTestSecret(types.NamespacedName{
Name: cl.GetName() + "-kubeconfig",
Namespace: cl.GetNamespace(),
}, map[string][]byte{"value": []byte("testing")})
// This cheats by using the local client as the remote client to simplify
// getting the value from the remote client.
reconciler := makeTestReconciler(t, bc, cl, secret, readyNode)
reconciler.configParser = func(b []byte) (client.Client, error) {
return reconciler.Client, nil
}

result, err := reconciler.Reconcile(context.TODO(), ctrl.Request{NamespacedName: types.NamespacedName{
Name: bc.GetName(),
Namespace: bc.GetNamespace(),
}})
if err != nil {
t.Fatal(err)
}
if !result.IsZero() {
t.Fatalf("want empty result, got %v", result)
}
var jobs batchv1.JobList
if err := reconciler.List(context.TODO(), &jobs, client.InNamespace(testNamespace)); err != nil {
t.Fatal(err)
}
if l := len(jobs.Items); l != 1 {
t.Fatalf("found %d jobs, want %d", l, 1)
}
}

func TestReconcile_when_cluster_provisioned(t *testing.T) {
bc := makeTestClusterBootstrapConfig(func(c *capiv1alpha1.ClusterBootstrapConfig) {
c.Spec.RequireClusterProvisioned = true
Expand Down Expand Up @@ -244,6 +338,56 @@ func TestReconcile_when_cluster_no_matching_labels(t *testing.T) {
assertNoJobsCreated(t, reconciler.Client)
}

func TestReconcile_when_cluster_ready_bootstrapped_with_multiple_config(t *testing.T) {
// Multiple configs can bootstrap the same cluster
// If the reconciled cluster is in that list (anywhere) then we don't create
// jobs.
bc := makeTestClusterBootstrapConfig(func(c *capiv1alpha1.ClusterBootstrapConfig) {
c.Spec.RequireClusterReady = true
})
readyNode := makeNode(map[string]string{
"node-role.kubernetes.io/control-plane": "",
}, corev1.NodeCondition{
Type: "Ready", Status: "True", LastHeartbeatTime: metav1.Now(), LastTransitionTime: metav1.Now(), Reason: "KubeletReady", Message: "kubelet is posting ready status"})

cl := makeTestCluster(func(c *gitopsv1alpha1.GitopsCluster) {
c.ObjectMeta.Labels = bc.Spec.ClusterSelector.MatchLabels
c.ObjectMeta.Annotations = map[string]string{
capiv1alpha1.BootstrappedAnnotation: "true",
capiv1alpha1.BootstrapConfigsAnnotation: fmt.Sprintf("%s,%s/%s", "unknown/unknown", bc.GetNamespace(), bc.GetName()),
}
c.Status.Conditions = append(c.Status.Conditions, makeReadyCondition())
})
secret := makeTestSecret(types.NamespacedName{
Name: cl.GetName() + "-kubeconfig",
Namespace: cl.GetNamespace(),
}, map[string][]byte{"value": []byte("testing")})
// This cheats by using the local client as the remote client to simplify
// getting the value from the remote client.
reconciler := makeTestReconciler(t, bc, cl, secret, readyNode)
reconciler.configParser = func(b []byte) (client.Client, error) {
return reconciler.Client, nil
}

result, err := reconciler.Reconcile(context.TODO(), ctrl.Request{NamespacedName: types.NamespacedName{
Name: bc.GetName(),
Namespace: bc.GetNamespace(),
}})
if err != nil {
t.Fatal(err)
}
if !result.IsZero() {
t.Fatalf("want empty result, got %v", result)
}
var jobs batchv1.JobList
if err := reconciler.List(context.TODO(), &jobs, client.InNamespace(testNamespace)); err != nil {
t.Fatal(err)
}
if l := len(jobs.Items); l != 0 {
t.Fatalf("found %d jobs, want %d", l, 0)
}
}

func TestReconcile_when_empty_label_selector(t *testing.T) {
// When the label selector is empty, we don't want any jobs created, rather
// than a job for all clusters.
Expand Down

0 comments on commit d36def5

Please sign in to comment.