Skip to content

Commit

Permalink
Add cluster controller test
Browse files Browse the repository at this point in the history
  • Loading branch information
willie-yao committed Sep 11, 2023
1 parent 2cf61ef commit 60d9728
Show file tree
Hide file tree
Showing 4 changed files with 187 additions and 26 deletions.
171 changes: 165 additions & 6 deletions internal/controllers/topology/cluster/cluster_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/reconcile"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1"
runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog"
runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1"
Expand All @@ -50,16 +51,19 @@ import (
)

var (
clusterName1 = "cluster1"
clusterName2 = "cluster2"
clusterClassName1 = "class1"
clusterClassName2 = "class2"
infrastructureMachineTemplateName1 = "inframachinetemplate1"
infrastructureMachineTemplateName2 = "inframachinetemplate2"
clusterName1 = "cluster1"
clusterName2 = "cluster2"
clusterClassName1 = "class1"
clusterClassName2 = "class2"
infrastructureMachineTemplateName1 = "inframachinetemplate1"
infrastructureMachineTemplateName2 = "inframachinetemplate2"
infrastructureMachinePoolTemplateName1 = "inframachinepooltemplate1"
infrastructureMachinePoolTemplateName2 = "inframachinepooltemplate2"
)

func TestClusterReconciler_reconcileNewlyCreatedCluster(t *testing.T) {
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)()
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)()
g := NewWithT(t)
timeout := 5 * time.Second

Expand Down Expand Up @@ -96,6 +100,9 @@ func TestClusterReconciler_reconcileNewlyCreatedCluster(t *testing.T) {
// Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates.
g.Expect(assertMachineDeploymentsReconcile(actualCluster)).Should(Succeed())

// Check if MachinePools are created and have the correct version, replicas, labels annotations and templates.
g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed())

// Check if the Cluster has the relevant TopologyReconciledCondition.
g.Expect(assertClusterTopologyReconciledCondition(actualCluster)).Should(Succeed())

Expand All @@ -105,6 +112,8 @@ func TestClusterReconciler_reconcileNewlyCreatedCluster(t *testing.T) {

func TestClusterReconciler_reconcileMultipleClustersFromOneClass(t *testing.T) {
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)()
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)()

g := NewWithT(t)
timeout := 5 * time.Second

Expand Down Expand Up @@ -144,6 +153,9 @@ func TestClusterReconciler_reconcileMultipleClustersFromOneClass(t *testing.T) {
// Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates.
g.Expect(assertMachineDeploymentsReconcile(actualCluster)).Should(Succeed())

// Check if MachinePools are created and have the correct version, replicas, labels annotations and templates.
g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed())

// Check if the Cluster has the relevant TopologyReconciledCondition.
g.Expect(assertClusterTopologyReconciledCondition(actualCluster)).Should(Succeed())
}
Expand All @@ -153,6 +165,7 @@ func TestClusterReconciler_reconcileMultipleClustersFromOneClass(t *testing.T) {

func TestClusterReconciler_reconcileUpdateOnClusterTopology(t *testing.T) {
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)()
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)()
g := NewWithT(t)
timeout := 300 * time.Second

Expand Down Expand Up @@ -190,6 +203,9 @@ func TestClusterReconciler_reconcileUpdateOnClusterTopology(t *testing.T) {
// Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates.
g.Expect(assertMachineDeploymentsReconcile(actualCluster)).Should(Succeed())

// Check if MachinePools are created and have the correct version, replicas, labels annotations and templates.
g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed())

// Check if the Cluster has the relevant TopologyReconciledCondition.
g.Expect(assertClusterTopologyReconciledCondition(actualCluster)).Should(Succeed())
return nil
Expand Down Expand Up @@ -226,6 +242,9 @@ func TestClusterReconciler_reconcileUpdateOnClusterTopology(t *testing.T) {
// Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates.
g.Expect(assertMachineDeploymentsReconcile(updatedCluster)).Should(Succeed())

// Check if MachinePools are created and have the correct version, replicas, labels annotations and templates.
g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed())

// Check if the Cluster has the relevant TopologyReconciledCondition.
g.Expect(assertClusterTopologyReconciledCondition(actualCluster)).Should(Succeed())
return nil
Expand All @@ -234,6 +253,7 @@ func TestClusterReconciler_reconcileUpdateOnClusterTopology(t *testing.T) {

func TestClusterReconciler_reconcileUpdatesOnClusterClass(t *testing.T) {
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)()
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)()
g := NewWithT(t)
timeout := 5 * time.Second

Expand Down Expand Up @@ -273,6 +293,9 @@ func TestClusterReconciler_reconcileUpdatesOnClusterClass(t *testing.T) {
// Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates.
g.Expect(assertMachineDeploymentsReconcile(actualCluster)).Should(Succeed())

// Check if MachinePools are created and have the correct version, replicas, labels annotations and templates.
g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed())

// Check if the Cluster has the relevant TopologyReconciledCondition.
g.Expect(assertClusterTopologyReconciledCondition(actualCluster)).Should(Succeed())
}
Expand Down Expand Up @@ -316,6 +339,9 @@ func TestClusterReconciler_reconcileUpdatesOnClusterClass(t *testing.T) {
// Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates.
g.Expect(assertMachineDeploymentsReconcile(actualCluster)).Should(Succeed())

// Check if MachinePools are created and have the correct version, replicas, labels annotations and templates.
g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed())

// Check if the Cluster has the relevant TopologyReconciledCondition.
g.Expect(assertClusterTopologyReconciledCondition(actualCluster)).Should(Succeed())
}
Expand All @@ -325,6 +351,7 @@ func TestClusterReconciler_reconcileUpdatesOnClusterClass(t *testing.T) {

func TestClusterReconciler_reconcileClusterClassRebase(t *testing.T) {
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)()
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)()
g := NewWithT(t)
timeout := 30 * time.Second

Expand Down Expand Up @@ -362,6 +389,10 @@ func TestClusterReconciler_reconcileClusterClassRebase(t *testing.T) {

// Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates.
g.Expect(assertMachineDeploymentsReconcile(actualCluster)).Should(Succeed())

// Check if MachinePools are created and have the correct version, replicas, labels annotations and templates.
g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed())

return nil
}, timeout).Should(Succeed())

Expand Down Expand Up @@ -392,12 +423,17 @@ func TestClusterReconciler_reconcileClusterClassRebase(t *testing.T) {

// Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates.
g.Expect(assertMachineDeploymentsReconcile(updatedCluster)).Should(Succeed())

// Check if MachinePools are created and have the correct version, replicas, labels annotations and templates.
g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed())

return nil
}, timeout).Should(Succeed())
}

func TestClusterReconciler_reconcileDelete(t *testing.T) {
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.RuntimeSDK, true)()
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)()

catalog := runtimecatalog.New()
_ = runtimehooksv1.AddToCatalog(catalog)
Expand Down Expand Up @@ -551,6 +587,7 @@ func TestClusterReconciler_reconcileDelete(t *testing.T) {
// In this case deletion of the ClusterClass should be blocked by the webhook.
func TestClusterReconciler_deleteClusterClass(t *testing.T) {
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)()
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)()
g := NewWithT(t)
timeout := 5 * time.Second

Expand Down Expand Up @@ -588,6 +625,9 @@ func TestClusterReconciler_deleteClusterClass(t *testing.T) {

// Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates.
g.Expect(assertMachineDeploymentsReconcile(actualCluster)).Should(Succeed())

// Check if MachinePools are created and have the correct version, replicas, labels annotations and templates.
g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed())
}
return nil
}, timeout).Should(Succeed())
Expand Down Expand Up @@ -709,6 +749,10 @@ func setupTestEnvForIntegrationTests(ns *corev1.Namespace) (func() error, error)
infrastructureMachineTemplate2 := builder.TestInfrastructureMachineTemplate(ns.Name, infrastructureMachineTemplateName2).
WithSpecFields(map[string]interface{}{"spec.template.spec.fakeSetting": true}).
Build()
infrastructureMachinePoolTemplate1 := builder.TestInfrastructureMachinePoolTemplate(ns.Name, infrastructureMachinePoolTemplateName1).Build()
infrastructureMachinePoolTemplate2 := builder.TestInfrastructureMachinePoolTemplate(ns.Name, infrastructureMachinePoolTemplateName2).
WithSpecFields(map[string]interface{}{"spec.template.spec.fakeSetting": true}).
Build()
infrastructureClusterTemplate1 := builder.TestInfrastructureClusterTemplate(ns.Name, "infraclustertemplate1").
Build()
infrastructureClusterTemplate2 := builder.TestInfrastructureClusterTemplate(ns.Name, "infraclustertemplate2").
Expand All @@ -734,11 +778,26 @@ func setupTestEnvForIntegrationTests(ns *corev1.Namespace) (func() error, error)
WithInfrastructureTemplate(infrastructureMachineTemplate2).
WithBootstrapTemplate(bootstrapTemplate).
Build()
machinePoolClass1 := builder.MachinePoolClass(workerClassName1).
WithInfrastructureTemplate(infrastructureMachinePoolTemplate1).
WithBootstrapTemplate(bootstrapTemplate).
WithLabels(map[string]string{"foo": "bar"}).
WithAnnotations(map[string]string{"foo": "bar"}).
Build()
machinePoolClass2 := builder.MachinePoolClass(workerClassName2).
WithInfrastructureTemplate(infrastructureMachinePoolTemplate1).
WithBootstrapTemplate(bootstrapTemplate).
Build()
machinePoolClass3 := builder.MachinePoolClass(workerClassName3).
WithInfrastructureTemplate(infrastructureMachinePoolTemplate2).
WithBootstrapTemplate(bootstrapTemplate).
Build()
clusterClass := builder.ClusterClass(ns.Name, clusterClassName1).
WithInfrastructureClusterTemplate(infrastructureClusterTemplate1).
WithControlPlaneTemplate(controlPlaneTemplate).
WithControlPlaneInfrastructureMachineTemplate(infrastructureMachineTemplate1).
WithWorkerMachineDeploymentClasses(*machineDeploymentClass1, *machineDeploymentClass2).
WithWorkerMachinePoolClasses(*machinePoolClass1, *machinePoolClass2).
Build()

// This ClusterClass changes a number of things in a ClusterClass in a way that is compatible for a ClusterClass rebase operation.
Expand All @@ -750,6 +809,7 @@ func setupTestEnvForIntegrationTests(ns *corev1.Namespace) (func() error, error)
WithControlPlaneTemplate(controlPlaneTemplate).
WithControlPlaneInfrastructureMachineTemplate(infrastructureMachineTemplate2).
WithWorkerMachineDeploymentClasses(*machineDeploymentClass1, *machineDeploymentClass2, *machineDeploymentClass3).
WithWorkerMachinePoolClasses(*machinePoolClass1, *machinePoolClass2, *machinePoolClass3).
Build()

// 3) Two Clusters including a Cluster Topology objects and the MachineDeploymentTopology objects used in the
Expand All @@ -762,13 +822,23 @@ func setupTestEnvForIntegrationTests(ns *corev1.Namespace) (func() error, error)
WithClass(workerClassName2).
WithReplicas(1).
Build()
machinePoolTopology1 := builder.MachinePoolTopology("mp1").
WithClass(workerClassName1).
WithReplicas(3).
Build()
machinePoolTopology2 := builder.MachinePoolTopology("mp2").
WithClass(workerClassName2).
WithReplicas(1).
Build()

cluster1 := builder.Cluster(ns.Name, clusterName1).
WithTopology(
builder.ClusterTopology().
WithClass(clusterClass.Name).
WithMachineDeployment(machineDeploymentTopology1).
WithMachineDeployment(machineDeploymentTopology2).
WithMachinePool(machinePoolTopology1).
WithMachinePool(machinePoolTopology2).
WithVersion("1.22.2").
WithControlPlaneReplicas(3).
Build()).
Expand All @@ -779,6 +849,7 @@ func setupTestEnvForIntegrationTests(ns *corev1.Namespace) (func() error, error)
builder.ClusterTopology().
WithClass(clusterClass.Name).
WithMachineDeployment(machineDeploymentTopology2).
WithMachinePool(machinePoolTopology2).
WithVersion("1.21.0").
WithControlPlaneReplicas(1).
Build()).
Expand All @@ -791,6 +862,8 @@ func setupTestEnvForIntegrationTests(ns *corev1.Namespace) (func() error, error)
infrastructureClusterTemplate2,
infrastructureMachineTemplate1,
infrastructureMachineTemplate2,
infrastructureMachinePoolTemplate1,
infrastructureMachinePoolTemplate2,
bootstrapTemplate,
controlPlaneTemplate,
clusterClass,
Expand Down Expand Up @@ -994,6 +1067,92 @@ func assertMachineDeploymentsReconcile(cluster *clusterv1.Cluster) error {
return nil
}

// assertMachinePoolsReconcile checks if the MachinePools:
// 1) Are created in the correct number.
// 2) Have the correct labels (TopologyOwned, ClusterName, MachinePoolName).
// 3) Have the correct replicas and version.
// 4) Have the correct Kind/APIVersion and Labels/Annotations for BoostrapRef and InfrastructureRef templates.
func assertMachinePoolsReconcile(cluster *clusterv1.Cluster) error {
// List all created machine pools to assert the expected numbers are created.
machinePools := &expv1.MachinePoolList{}
if err := env.List(ctx, machinePools, client.InNamespace(cluster.Namespace)); err != nil {
return err
}

// clusterMPs will hold the MachinePools that have labels associating them with the cluster.
clusterMPs := []expv1.MachinePool{}

// Run through all machine pools and add only those with the TopologyOwnedLabel and the correct
// ClusterNameLabel to the items for further testing.
for _, m := range machinePools.Items {
// If the machinePool doesn't have the ClusterTopologyOwnedLabel and the ClusterNameLabel ignore.
md := m
if err := assertClusterTopologyOwnedLabel(&md); err != nil {
continue
}
if err := assertClusterNameLabel(&md, cluster.Name); err != nil {
continue
}
clusterMPs = append(clusterMPs, md)
}

// If the total number of machine pools is not as expected return false.
if len(clusterMPs) != len(cluster.Spec.Topology.Workers.MachinePools) {
return fmt.Errorf("number of MachinePools %v does not match number expected %v", len(clusterMPs), len(cluster.Spec.Topology.Workers.MachinePools))
}
for _, m := range clusterMPs {
for _, topologyMP := range cluster.Spec.Topology.Workers.MachinePools {
mp := m
// use the ClusterTopologyMachinePoolLabel to get the specific machinePool to compare to.
if topologyMP.Name != mp.GetLabels()[clusterv1.ClusterTopologyMachinePoolNameLabel] {
continue
}

// Check if the ClusterTopologyLabelName and ClusterTopologyOwnedLabel are set correctly.
if err := assertClusterTopologyOwnedLabel(&mp); err != nil {
return err
}

if err := assertClusterNameLabel(&mp, cluster.Name); err != nil {
return err
}

// Check replicas and version for the MachinePool.
if *mp.Spec.Replicas != *topologyMP.Replicas {
return fmt.Errorf("replicas %v does not match expected %v", mp.Spec.Replicas, topologyMP.Replicas)
}
if *mp.Spec.Template.Spec.Version != cluster.Spec.Topology.Version {
return fmt.Errorf("version %v does not match expected %v", *mp.Spec.Template.Spec.Version, cluster.Spec.Topology.Version)
}

// Check if the InfrastructureReference exists.
if err := referenceExistsWithCorrectKindAndAPIVersion(&mp.Spec.Template.Spec.InfrastructureRef,
builder.TestInfrastructureMachinePoolKind,
builder.InfrastructureGroupVersion); err != nil {
return err
}

// Check if the InfrastructureReference has the expected labels and annotations.
if _, err := getAndAssertLabelsAndAnnotations(mp.Spec.Template.Spec.InfrastructureRef, cluster.Name); err != nil {
return err
}

// Check if the Bootstrap reference has the expected Kind and APIVersion.
if err := referenceExistsWithCorrectKindAndAPIVersion(mp.Spec.Template.Spec.Bootstrap.ConfigRef,
builder.TestBootstrapConfigKind,
builder.BootstrapGroupVersion); err != nil {
return err
}

// Check if the Bootstrap reference has the expected labels and annotations.
if _, err := getAndAssertLabelsAndAnnotations(*mp.Spec.Template.Spec.Bootstrap.ConfigRef, cluster.Name); err != nil {
return err
}
}
}
return nil
}

// getAndAssertLabelsAndAnnotations pulls the template referenced in the ObjectReference from the API server, checks for:
// 1) The ClusterTopologyOwnedLabel.
// 2) The correct ClusterNameLabel.
Expand Down
30 changes: 18 additions & 12 deletions internal/controllers/topology/cluster/desired_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,18 +81,24 @@ func (r *Reconciler) computeDesiredState(ctx context.Context, s *scope.Scope) (*
// - Building the TopologyReconciled condition.
// - Make upgrade decisions on the control plane.
// - Making upgrade decisions on machine pools.
if len(s.Current.MachinePools) > 0 {
client, err := r.Tracker.GetClient(ctx, client.ObjectKeyFromObject(s.Current.Cluster))
if err != nil {
return nil, errors.Wrap(err, "failed to check if any MachinePool is upgrading")
}
// Mark all the MachinePools that are currently upgrading.
mpUpgradingNames, err := s.Current.MachinePools.Upgrading(ctx, client)
if err != nil {
return nil, errors.Wrap(err, "failed to check if any MachinePool is upgrading")
}
s.UpgradeTracker.MachinePools.MarkUpgrading(mpUpgradingNames...)
}
// Mark all the MachinePools that are currently upgrading.
mpUpgradingNames, err := s.Current.MachinePools.Upgrading(ctx, r.Client)
if err != nil {
return nil, errors.Wrap(err, "failed to check if any MachinePool is upgrading")
}
s.UpgradeTracker.MachinePools.MarkUpgrading(mpUpgradingNames...)
// if len(s.Current.MachinePools) > 0 {
// client, err := r.Tracker.GetClient(ctx, client.ObjectKeyFromObject(s.Current.Cluster))
// if err != nil {
// return nil, errors.Wrap(err, "failed to check if any MachinePool is upgrading")
// }
// // Mark all the MachinePools that are currently upgrading.
// mpUpgradingNames, err := s.Current.MachinePools.Upgrading(ctx, client)
// if err != nil {
// return nil, errors.Wrap(err, "failed to check if any MachinePool is upgrading")
// }
// s.UpgradeTracker.MachinePools.MarkUpgrading(mpUpgradingNames...)
// }

// Compute the desired state of the ControlPlane object, eventually adding a reference to the
// InfrastructureMachineTemplate generated by the previous step.
Expand Down
Loading

0 comments on commit 60d9728

Please sign in to comment.