diff --git a/controllers/remote/cluster_cache_tracker_fake.go b/controllers/remote/cluster_cache_tracker_fake.go index fd1c08a5e0e6..ba1627592936 100644 --- a/controllers/remote/cluster_cache_tracker_fake.go +++ b/controllers/remote/cluster_cache_tracker_fake.go @@ -30,6 +30,7 @@ func NewTestClusterCacheTracker(log logr.Logger, cl client.Client, scheme *runti client: cl, scheme: scheme, clusterAccessors: make(map[client.ObjectKey]*clusterAccessor), + clusterLock: newKeyedMutex(), } testCacheTracker.clusterAccessors[objKey] = &clusterAccessor{ diff --git a/internal/controllers/topology/cluster/cluster_controller_test.go b/internal/controllers/topology/cluster/cluster_controller_test.go index 2d0317a3c087..1f6ce841339e 100644 --- a/internal/controllers/topology/cluster/cluster_controller_test.go +++ b/internal/controllers/topology/cluster/cluster_controller_test.go @@ -36,6 +36,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1" runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" @@ -46,20 +47,24 @@ import ( fakeruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client/fake" "sigs.k8s.io/cluster-api/internal/test/builder" "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/kubeconfig" "sigs.k8s.io/cluster-api/util/patch" ) var ( - clusterName1 = "cluster1" - clusterName2 = "cluster2" - clusterClassName1 = "class1" - clusterClassName2 = "class2" - infrastructureMachineTemplateName1 = "inframachinetemplate1" - infrastructureMachineTemplateName2 = "inframachinetemplate2" + clusterName1 = "cluster1" + clusterName2 = "cluster2" + clusterClassName1 = "class1" + clusterClassName2 = "class2" + infrastructureMachineTemplateName1 = "inframachinetemplate1" + infrastructureMachineTemplateName2 = "inframachinetemplate2" + infrastructureMachinePoolTemplateName1 = "inframachinepooltemplate1" + infrastructureMachinePoolTemplateName2 = "inframachinepooltemplate2" ) func TestClusterReconciler_reconcileNewlyCreatedCluster(t *testing.T) { defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)() + defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)() g := NewWithT(t) timeout := 5 * time.Second @@ -96,6 +101,9 @@ func TestClusterReconciler_reconcileNewlyCreatedCluster(t *testing.T) { // Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates. g.Expect(assertMachineDeploymentsReconcile(actualCluster)).Should(Succeed()) + // Check if MachinePools are created and have the correct version, replicas, labels annotations and templates. + g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed()) + // Check if the Cluster has the relevant TopologyReconciledCondition. g.Expect(assertClusterTopologyReconciledCondition(actualCluster)).Should(Succeed()) @@ -105,6 +113,8 @@ func TestClusterReconciler_reconcileNewlyCreatedCluster(t *testing.T) { func TestClusterReconciler_reconcileMultipleClustersFromOneClass(t *testing.T) { defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)() + defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)() + g := NewWithT(t) timeout := 5 * time.Second @@ -144,6 +154,9 @@ func TestClusterReconciler_reconcileMultipleClustersFromOneClass(t *testing.T) { // Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates. g.Expect(assertMachineDeploymentsReconcile(actualCluster)).Should(Succeed()) + // Check if MachinePools are created and have the correct version, replicas, labels annotations and templates. + g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed()) + // Check if the Cluster has the relevant TopologyReconciledCondition. g.Expect(assertClusterTopologyReconciledCondition(actualCluster)).Should(Succeed()) } @@ -153,6 +166,7 @@ func TestClusterReconciler_reconcileMultipleClustersFromOneClass(t *testing.T) { func TestClusterReconciler_reconcileUpdateOnClusterTopology(t *testing.T) { defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)() + defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)() g := NewWithT(t) timeout := 300 * time.Second @@ -190,6 +204,9 @@ func TestClusterReconciler_reconcileUpdateOnClusterTopology(t *testing.T) { // Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates. g.Expect(assertMachineDeploymentsReconcile(actualCluster)).Should(Succeed()) + // Check if MachinePools are created and have the correct version, replicas, labels annotations and templates. + g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed()) + // Check if the Cluster has the relevant TopologyReconciledCondition. g.Expect(assertClusterTopologyReconciledCondition(actualCluster)).Should(Succeed()) return nil @@ -201,9 +218,10 @@ func TestClusterReconciler_reconcileUpdateOnClusterTopology(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) clusterWithTopologyChange := actualCluster.DeepCopy() clusterWithTopologyChange.Spec.Topology.Workers.MachineDeployments[0].Replicas = &replicas + clusterWithTopologyChange.Spec.Topology.Workers.MachinePools[0].Replicas = &replicas g.Expect(patchHelper.Patch(ctx, clusterWithTopologyChange)).Should(Succeed()) - // Check to ensure all objects are correctly reconciled with the new MachineDeployment replica count in Topology. + // Check to ensure all objects are correctly reconciled with the new MachineDeployment and MachinePool replica count in Topology. g.Eventually(func(g Gomega) error { // Get the cluster object. updatedCluster := &clusterv1.Cluster{} @@ -214,6 +232,9 @@ func TestClusterReconciler_reconcileUpdateOnClusterTopology(t *testing.T) { // Check to ensure the replica count has been successfully updated in the API server and cache. g.Expect(updatedCluster.Spec.Topology.Workers.MachineDeployments[0].Replicas).To(Equal(&replicas)) + // Check to ensure the replica count has been successfully updated in the API server and cache. + g.Expect(updatedCluster.Spec.Topology.Workers.MachinePools[0].Replicas).To(Equal(&replicas)) + // Check if Cluster has relevant Infrastructure and ControlPlane and labels and annotations. g.Expect(assertClusterReconcile(updatedCluster)).Should(Succeed()) @@ -226,6 +247,9 @@ func TestClusterReconciler_reconcileUpdateOnClusterTopology(t *testing.T) { // Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates. g.Expect(assertMachineDeploymentsReconcile(updatedCluster)).Should(Succeed()) + // Check if MachinePools are created and have the correct version, replicas, labels annotations and templates. + g.Expect(assertMachinePoolsReconcile(updatedCluster)).Should(Succeed()) + // Check if the Cluster has the relevant TopologyReconciledCondition. g.Expect(assertClusterTopologyReconciledCondition(actualCluster)).Should(Succeed()) return nil @@ -234,6 +258,7 @@ func TestClusterReconciler_reconcileUpdateOnClusterTopology(t *testing.T) { func TestClusterReconciler_reconcileUpdatesOnClusterClass(t *testing.T) { defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)() + defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)() g := NewWithT(t) timeout := 5 * time.Second @@ -273,6 +298,9 @@ func TestClusterReconciler_reconcileUpdatesOnClusterClass(t *testing.T) { // Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates. g.Expect(assertMachineDeploymentsReconcile(actualCluster)).Should(Succeed()) + // Check if MachinePools are created and have the correct version, replicas, labels annotations and templates. + g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed()) + // Check if the Cluster has the relevant TopologyReconciledCondition. g.Expect(assertClusterTopologyReconciledCondition(actualCluster)).Should(Succeed()) } @@ -285,8 +313,10 @@ func TestClusterReconciler_reconcileUpdatesOnClusterClass(t *testing.T) { patchHelper, err := patch.NewHelper(clusterClass, env.Client) g.Expect(err).ToNot(HaveOccurred()) - // Change the infrastructureMachineTemplateName for the first of our machineDeployment and update in the API. + // Change the infrastructureMachineTemplateName for the first of our MachineDeployments and update in the API. clusterClass.Spec.Workers.MachineDeployments[0].Template.Infrastructure.Ref.Name = infrastructureMachineTemplateName2 + // Change the infrastructureMachinePoolTemplateName for the first of our MachinePools and update in the API. + clusterClass.Spec.Workers.MachinePools[0].Template.Infrastructure.Ref.Name = infrastructureMachinePoolTemplateName2 g.Expect(patchHelper.Patch(ctx, clusterClass)).To(Succeed()) g.Eventually(func(g Gomega) error { @@ -295,6 +325,7 @@ func TestClusterReconciler_reconcileUpdatesOnClusterClass(t *testing.T) { class := &clusterv1.ClusterClass{} g.Expect(env.Get(ctx, client.ObjectKey{Namespace: ns.Name, Name: actualCluster.Spec.Topology.Class}, class)).To(Succeed()) g.Expect(class.Spec.Workers.MachineDeployments[0].Template.Infrastructure.Ref.Name).To(Equal(infrastructureMachineTemplateName2)) + g.Expect(class.Spec.Workers.MachinePools[0].Template.Infrastructure.Ref.Name).To(Equal(infrastructureMachinePoolTemplateName2)) // For each cluster check that the clusterClass changes have been correctly reconciled. for _, name := range []string{clusterName1, clusterName2} { @@ -316,6 +347,9 @@ func TestClusterReconciler_reconcileUpdatesOnClusterClass(t *testing.T) { // Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates. g.Expect(assertMachineDeploymentsReconcile(actualCluster)).Should(Succeed()) + // Check if MachinePools are created and have the correct version, replicas, labels annotations and templates. + g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed()) + // Check if the Cluster has the relevant TopologyReconciledCondition. g.Expect(assertClusterTopologyReconciledCondition(actualCluster)).Should(Succeed()) } @@ -325,6 +359,7 @@ func TestClusterReconciler_reconcileUpdatesOnClusterClass(t *testing.T) { func TestClusterReconciler_reconcileClusterClassRebase(t *testing.T) { defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)() + defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)() g := NewWithT(t) timeout := 30 * time.Second @@ -362,6 +397,10 @@ func TestClusterReconciler_reconcileClusterClassRebase(t *testing.T) { // Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates. g.Expect(assertMachineDeploymentsReconcile(actualCluster)).Should(Succeed()) + + // Check if MachinePools are created and have the correct version, replicas, labels annotations and templates. + g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed()) + return nil }, timeout).Should(Succeed()) @@ -392,12 +431,17 @@ func TestClusterReconciler_reconcileClusterClassRebase(t *testing.T) { // Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates. g.Expect(assertMachineDeploymentsReconcile(updatedCluster)).Should(Succeed()) + + // Check if MachinePools are created and have the correct version, replicas, labels annotations and templates. + g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed()) + return nil }, timeout).Should(Succeed()) } func TestClusterReconciler_reconcileDelete(t *testing.T) { defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.RuntimeSDK, true)() + defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)() catalog := runtimecatalog.New() _ = runtimehooksv1.AddToCatalog(catalog) @@ -551,6 +595,7 @@ func TestClusterReconciler_reconcileDelete(t *testing.T) { // In this case deletion of the ClusterClass should be blocked by the webhook. func TestClusterReconciler_deleteClusterClass(t *testing.T) { defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)() + defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)() g := NewWithT(t) timeout := 5 * time.Second @@ -588,6 +633,9 @@ func TestClusterReconciler_deleteClusterClass(t *testing.T) { // Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates. g.Expect(assertMachineDeploymentsReconcile(actualCluster)).Should(Succeed()) + + // Check if MachinePools are created and have the correct version, replicas, labels annotations and templates. + g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed()) } return nil }, timeout).Should(Succeed()) @@ -709,6 +757,10 @@ func setupTestEnvForIntegrationTests(ns *corev1.Namespace) (func() error, error) infrastructureMachineTemplate2 := builder.TestInfrastructureMachineTemplate(ns.Name, infrastructureMachineTemplateName2). WithSpecFields(map[string]interface{}{"spec.template.spec.fakeSetting": true}). Build() + infrastructureMachinePoolTemplate1 := builder.TestInfrastructureMachinePoolTemplate(ns.Name, infrastructureMachinePoolTemplateName1).Build() + infrastructureMachinePoolTemplate2 := builder.TestInfrastructureMachinePoolTemplate(ns.Name, infrastructureMachinePoolTemplateName2). + WithSpecFields(map[string]interface{}{"spec.template.fakeSetting": true}). + Build() infrastructureClusterTemplate1 := builder.TestInfrastructureClusterTemplate(ns.Name, "infraclustertemplate1"). Build() infrastructureClusterTemplate2 := builder.TestInfrastructureClusterTemplate(ns.Name, "infraclustertemplate2"). @@ -719,47 +771,71 @@ func setupTestEnvForIntegrationTests(ns *corev1.Namespace) (func() error, error) Build() bootstrapTemplate := builder.TestBootstrapTemplate(ns.Name, "bootstraptemplate").Build() - // 2) ClusterClass definitions including definitions of MachineDeploymentClasses used inside the ClusterClass. - machineDeploymentClass1 := builder.MachineDeploymentClass(workerClassName1). + // 2) ClusterClass definitions including definitions of MachineDeploymentClasses and MachinePoolClasses used inside the ClusterClass. + machineDeploymentClass1 := builder.MachineDeploymentClass(workerClassName1 + "-md"). WithInfrastructureTemplate(infrastructureMachineTemplate1). WithBootstrapTemplate(bootstrapTemplate). WithLabels(map[string]string{"foo": "bar"}). WithAnnotations(map[string]string{"foo": "bar"}). Build() - machineDeploymentClass2 := builder.MachineDeploymentClass(workerClassName2). + machineDeploymentClass2 := builder.MachineDeploymentClass(workerClassName2 + "-md"). WithInfrastructureTemplate(infrastructureMachineTemplate1). WithBootstrapTemplate(bootstrapTemplate). Build() - machineDeploymentClass3 := builder.MachineDeploymentClass(workerClassName3). + machineDeploymentClass3 := builder.MachineDeploymentClass(workerClassName3 + "-md"). WithInfrastructureTemplate(infrastructureMachineTemplate2). WithBootstrapTemplate(bootstrapTemplate). Build() + machinePoolClass1 := builder.MachinePoolClass(workerClassName1 + "-mp"). + WithInfrastructureTemplate(infrastructureMachinePoolTemplate1). + WithBootstrapTemplate(bootstrapTemplate). + WithLabels(map[string]string{"foo": "bar"}). + WithAnnotations(map[string]string{"foo": "bar"}). + Build() + machinePoolClass2 := builder.MachinePoolClass(workerClassName2 + "-mp"). + WithInfrastructureTemplate(infrastructureMachinePoolTemplate1). + WithBootstrapTemplate(bootstrapTemplate). + Build() + machinePoolClass3 := builder.MachinePoolClass(workerClassName3 + "-mp"). + WithInfrastructureTemplate(infrastructureMachinePoolTemplate2). + WithBootstrapTemplate(bootstrapTemplate). + Build() clusterClass := builder.ClusterClass(ns.Name, clusterClassName1). WithInfrastructureClusterTemplate(infrastructureClusterTemplate1). WithControlPlaneTemplate(controlPlaneTemplate). WithControlPlaneInfrastructureMachineTemplate(infrastructureMachineTemplate1). WithWorkerMachineDeploymentClasses(*machineDeploymentClass1, *machineDeploymentClass2). + WithWorkerMachinePoolClasses(*machinePoolClass1, *machinePoolClass2). Build() // This ClusterClass changes a number of things in a ClusterClass in a way that is compatible for a ClusterClass rebase operation. // 1) It changes the controlPlaneMachineInfrastructureTemplate to a new template. - // 2) It adds a new machineDeploymentClass + // 2) It adds a new machineDeploymentClass and machinePoolClass // 3) It changes the infrastructureClusterTemplate. clusterClassForRebase := builder.ClusterClass(ns.Name, clusterClassName2). WithInfrastructureClusterTemplate(infrastructureClusterTemplate2). WithControlPlaneTemplate(controlPlaneTemplate). WithControlPlaneInfrastructureMachineTemplate(infrastructureMachineTemplate2). WithWorkerMachineDeploymentClasses(*machineDeploymentClass1, *machineDeploymentClass2, *machineDeploymentClass3). + WithWorkerMachinePoolClasses(*machinePoolClass1, *machinePoolClass2, *machinePoolClass3). Build() - // 3) Two Clusters including a Cluster Topology objects and the MachineDeploymentTopology objects used in the - // Cluster Topology. The second cluster differs from the first both in version and in its MachineDeployment definition. + // 3) Two Clusters including a Cluster Topology objects and the MachineDeploymentTopology and MachinePoolTopology objects used in the + // Cluster Topology. The second cluster differs from the first both in version and in its MachineDeployment and MachinePool definitions. machineDeploymentTopology1 := builder.MachineDeploymentTopology("mdm1"). - WithClass(workerClassName1). + WithClass(workerClassName1 + "-md"). WithReplicas(3). Build() machineDeploymentTopology2 := builder.MachineDeploymentTopology("mdm2"). - WithClass(workerClassName2). + WithClass(workerClassName2 + "-md"). + WithReplicas(1). + Build() + machinePoolTopology1 := builder.MachinePoolTopology("mp1"). + WithClass(workerClassName1 + "-mp"). + WithReplicas(3). + Build() + machinePoolTopology2 := builder.MachinePoolTopology("mp2"). + WithClass(workerClassName2 + "-mp"). WithReplicas(1). Build() @@ -769,6 +845,8 @@ func setupTestEnvForIntegrationTests(ns *corev1.Namespace) (func() error, error) WithClass(clusterClass.Name). WithMachineDeployment(machineDeploymentTopology1). WithMachineDeployment(machineDeploymentTopology2). + WithMachinePool(machinePoolTopology1). + WithMachinePool(machinePoolTopology2). WithVersion("1.22.2"). WithControlPlaneReplicas(3). Build()). @@ -779,11 +857,19 @@ func setupTestEnvForIntegrationTests(ns *corev1.Namespace) (func() error, error) builder.ClusterTopology(). WithClass(clusterClass.Name). WithMachineDeployment(machineDeploymentTopology2). + WithMachinePool(machinePoolTopology2). WithVersion("1.21.0"). WithControlPlaneReplicas(1). Build()). Build() + // Setup kubeconfig secrets for the clusters, so the ClusterCacheTracker works. + cluster1Secret := kubeconfig.GenerateSecret(cluster1, kubeconfig.FromEnvTestConfig(env.Config, cluster1)) + cluster2Secret := kubeconfig.GenerateSecret(cluster2, kubeconfig.FromEnvTestConfig(env.Config, cluster2)) + // Unset the ownerrefs otherwise they are invalid because they contain an empty uid. + cluster1Secret.ObjectMeta.OwnerReferences = nil + cluster2Secret.ObjectMeta.OwnerReferences = nil + // Create a set of setupTestEnvForIntegrationTests from the objects above to add to the API server when the test environment starts. // The objects are created for every test, though some e.g. infrastructureMachineTemplate2 may not be used in every test. initObjs := []client.Object{ @@ -791,12 +877,16 @@ func setupTestEnvForIntegrationTests(ns *corev1.Namespace) (func() error, error) infrastructureClusterTemplate2, infrastructureMachineTemplate1, infrastructureMachineTemplate2, + infrastructureMachinePoolTemplate1, + infrastructureMachinePoolTemplate2, bootstrapTemplate, controlPlaneTemplate, clusterClass, clusterClassForRebase, cluster1, cluster2, + cluster1Secret, + cluster2Secret, } cleanup := func() error { // Delete Objects in reverse, because we cannot delete a ClusterCLass if it is still used by a Cluster. @@ -994,6 +1084,92 @@ func assertMachineDeploymentsReconcile(cluster *clusterv1.Cluster) error { return nil } +// assertMachinePoolsReconcile checks if the MachinePools: +// 1) Are created in the correct number. +// 2) Have the correct labels (TopologyOwned, ClusterName, MachinePoolName). +// 3) Have the correct replicas and version. +// 4) Have the correct Kind/APIVersion and Labels/Annotations for BoostrapRef and InfrastructureRef templates. +func assertMachinePoolsReconcile(cluster *clusterv1.Cluster) error { + // List all created machine pools to assert the expected numbers are created. + machinePools := &expv1.MachinePoolList{} + if err := env.List(ctx, machinePools, client.InNamespace(cluster.Namespace)); err != nil { + return err + } + + // clusterMPs will hold the MachinePools that have labels associating them with the cluster. + clusterMPs := []expv1.MachinePool{} + + // Run through all machine pools and add only those with the TopologyOwnedLabel and the correct + // ClusterNameLabel to the items for further testing. + for _, m := range machinePools.Items { + // If the machinePool doesn't have the ClusterTopologyOwnedLabel and the ClusterNameLabel ignore. + mp := m + if err := assertClusterTopologyOwnedLabel(&mp); err != nil { + continue + } + if err := assertClusterNameLabel(&mp, cluster.Name); err != nil { + continue + } + clusterMPs = append(clusterMPs, mp) + } + + // If the total number of machine pools is not as expected return false. + if len(clusterMPs) != len(cluster.Spec.Topology.Workers.MachinePools) { + return fmt.Errorf("number of MachinePools %v does not match number expected %v", len(clusterMPs), len(cluster.Spec.Topology.Workers.MachinePools)) + } + for _, m := range clusterMPs { + for _, topologyMP := range cluster.Spec.Topology.Workers.MachinePools { + mp := m + // use the ClusterTopologyMachinePoolLabel to get the specific machinePool to compare to. + if topologyMP.Name != mp.GetLabels()[clusterv1.ClusterTopologyMachinePoolNameLabel] { + continue + } + + // Check if the ClusterTopologyLabelName and ClusterTopologyOwnedLabel are set correctly. + if err := assertClusterTopologyOwnedLabel(&mp); err != nil { + return err + } + + if err := assertClusterNameLabel(&mp, cluster.Name); err != nil { + return err + } + + // Check replicas and version for the MachinePool. + if *mp.Spec.Replicas != *topologyMP.Replicas { + return fmt.Errorf("replicas %v does not match expected %v", mp.Spec.Replicas, topologyMP.Replicas) + } + if *mp.Spec.Template.Spec.Version != cluster.Spec.Topology.Version { + return fmt.Errorf("version %v does not match expected %v", *mp.Spec.Template.Spec.Version, cluster.Spec.Topology.Version) + } + + // Check if the InfrastructureReference exists. + if err := referenceExistsWithCorrectKindAndAPIVersion(&mp.Spec.Template.Spec.InfrastructureRef, + builder.TestInfrastructureMachinePoolKind, + builder.InfrastructureGroupVersion); err != nil { + return err + } + + // Check if the InfrastructureReference has the expected labels and annotations. + if _, err := getAndAssertLabelsAndAnnotations(mp.Spec.Template.Spec.InfrastructureRef, cluster.Name); err != nil { + return err + } + + // Check if the Bootstrap reference has the expected Kind and APIVersion. + if err := referenceExistsWithCorrectKindAndAPIVersion(mp.Spec.Template.Spec.Bootstrap.ConfigRef, + builder.TestBootstrapConfigKind, + builder.BootstrapGroupVersion); err != nil { + return err + } + + // Check if the Bootstrap reference has the expected labels and annotations. + if _, err := getAndAssertLabelsAndAnnotations(*mp.Spec.Template.Spec.Bootstrap.ConfigRef, cluster.Name); err != nil { + return err + } + } + } + return nil +} + // getAndAssertLabelsAndAnnotations pulls the template referenced in the ObjectReference from the API server, checks for: // 1) The ClusterTopologyOwnedLabel. // 2) The correct ClusterNameLabel. @@ -1083,6 +1259,11 @@ func TestReconciler_DefaultCluster(t *testing.T) { mdTopologyBase := builder.MachineDeploymentTopology("md1"). WithClass("worker1"). WithReplicas(3) + mpClass1 := builder.MachinePoolClass("worker1"). + Build() + mpTopologyBase := builder.MachinePoolTopology("mp1"). + WithClass("worker1"). + WithReplicas(3) clusterBuilder := builder.Cluster(metav1.NamespaceDefault, clusterName1). WithTopology(topologyBase.DeepCopy().Build()) @@ -1151,6 +1332,7 @@ func TestReconciler_DefaultCluster(t *testing.T) { name: "Default nested values of Cluster variables with values from ClusterClass", clusterClass: classBuilder.DeepCopy(). WithWorkerMachineDeploymentClasses(*mdClass1). + WithWorkerMachinePoolClasses(*mpClass1). WithStatusVariables([]clusterv1.ClusterClassStatusVariable{ { Name: "location", @@ -1201,6 +1383,11 @@ func TestReconciler_DefaultCluster(t *testing.T) { Name: "httpProxy", Value: apiextensionsv1.JSON{Raw: []byte(`{"enabled":true}`)}, }).Build()). + WithMachinePool(mpTopologyBase.DeepCopy(). + WithVariables(clusterv1.ClusterVariable{ + Name: "httpProxy", + Value: apiextensionsv1.JSON{Raw: []byte(`{"enabled":true}`)}, + }).Build()). Build()). Build(), wantCluster: clusterBuilder.DeepCopy().WithTopology( @@ -1218,6 +1405,16 @@ func TestReconciler_DefaultCluster(t *testing.T) { }, }). Build()). + WithMachinePool( + mpTopologyBase.DeepCopy().WithVariables( + clusterv1.ClusterVariable{ + Name: "httpProxy", + Value: apiextensionsv1.JSON{ + // url has been added by defaulting. + Raw: []byte(`{"enabled":true,"url":"http://localhost:3128"}`), + }, + }). + Build()). Build()). Build(), }, @@ -1245,6 +1442,9 @@ func TestReconciler_ValidateCluster(t *testing.T) { mdTopologyBase := builder.MachineDeploymentTopology("md1"). WithClass("worker1"). WithReplicas(3) + mpTopologyBase := builder.MachinePoolTopology("mp1"). + WithClass("worker1"). + WithReplicas(3) classBuilder := builder.ClusterClass(metav1.NamespaceDefault, clusterClassName1) topologyBase := builder.ClusterTopology(). WithClass(clusterClassName1). @@ -1305,7 +1505,8 @@ func TestReconciler_ValidateCluster(t *testing.T) { WithClass(clusterClassName1). WithVersion("1.22.2"). WithControlPlaneReplicas(3). - WithMachineDeployment(mdTopologyBase.Build()).Build(), + WithMachineDeployment(mdTopologyBase.Build()). + WithMachinePool(mpTopologyBase.Build()).Build(), ). Build(), wantValidationErr: true, diff --git a/internal/controllers/topology/cluster/conditions_test.go b/internal/controllers/topology/cluster/conditions_test.go index d2b21aa3af55..8775ff6467cb 100644 --- a/internal/controllers/topology/cluster/conditions_test.go +++ b/internal/controllers/topology/cluster/conditions_test.go @@ -28,6 +28,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope" "sigs.k8s.io/cluster-api/internal/test/builder" @@ -38,6 +39,7 @@ func TestReconcileTopologyReconciledCondition(t *testing.T) { g := NewWithT(t) scheme := runtime.NewScheme() g.Expect(clusterv1.AddToScheme(scheme)).To(Succeed()) + g.Expect(expv1.AddToScheme(scheme)).To(Succeed()) deletionTime := metav1.Unix(0, 0) tests := []struct { @@ -240,6 +242,50 @@ func TestReconcileTopologyReconciledCondition(t *testing.T) { wantConditionReason: clusterv1.TopologyReconciledControlPlaneUpgradePendingReason, wantConditionMessage: "Control plane rollout and upgrade to version v1.22.0 on hold. MachineDeployment(s) md0-abc123 are upgrading", }, + { + name: "should set the condition to false if new version is not picked up because at least one of the machine pool is upgrading", + reconcileErr: nil, + cluster: &clusterv1.Cluster{}, + s: &scope.Scope{ + Blueprint: &scope.ClusterBlueprint{ + Topology: &clusterv1.Topology{ + Version: "v1.22.0", + }, + }, + Current: &scope.ClusterState{ + Cluster: &clusterv1.Cluster{}, + ControlPlane: &scope.ControlPlaneState{ + Object: builder.ControlPlane("ns1", "controlplane1"). + WithVersion("v1.21.2"). + WithReplicas(3). + Build(), + }, + MachinePools: scope.MachinePoolsStateMap{ + "mp0": &scope.MachinePoolState{ + Object: builder.MachinePool("ns1", "mp0-abc123"). + WithReplicas(2). + WithStatus(expv1.MachinePoolStatus{ + Replicas: int32(1), + ReadyReplicas: int32(1), + AvailableReplicas: int32(1), + UnavailableReplicas: int32(0), + }). + Build(), + }, + }, + }, + UpgradeTracker: func() *scope.UpgradeTracker { + ut := scope.NewUpgradeTracker() + ut.ControlPlane.IsPendingUpgrade = true + ut.MachinePools.MarkUpgrading("mp0-abc123") + return ut + }(), + HookResponseTracker: scope.NewHookResponseTracker(), + }, + wantConditionStatus: corev1.ConditionFalse, + wantConditionReason: clusterv1.TopologyReconciledControlPlaneUpgradePendingReason, + wantConditionMessage: "Control plane rollout and upgrade to version v1.22.0 on hold. MachinePool(s) mp0-abc123 are upgrading", + }, { name: "should set the condition to false if control plane picked the new version but machine deployments did not because control plane is upgrading", reconcileErr: nil, @@ -286,6 +332,51 @@ func TestReconcileTopologyReconciledCondition(t *testing.T) { wantConditionReason: clusterv1.TopologyReconciledMachineDeploymentsUpgradePendingReason, wantConditionMessage: "MachineDeployment(s) md0-abc123 rollout and upgrade to version v1.22.0 on hold. Control plane is upgrading to version v1.22.0", }, + { + name: "should set the condition to false if control plane picked the new version but machine pools did not because control plane is upgrading", + reconcileErr: nil, + cluster: &clusterv1.Cluster{}, + s: &scope.Scope{ + Blueprint: &scope.ClusterBlueprint{ + Topology: &clusterv1.Topology{ + Version: "v1.22.0", + }, + }, + Current: &scope.ClusterState{ + Cluster: &clusterv1.Cluster{}, + ControlPlane: &scope.ControlPlaneState{ + Object: builder.ControlPlane("ns1", "controlplane1"). + WithVersion("v1.22.0"). + WithReplicas(3). + Build(), + }, + MachinePools: scope.MachinePoolsStateMap{ + "mp0": &scope.MachinePoolState{ + Object: builder.MachinePool("ns1", "mp0-abc123"). + WithReplicas(2). + WithStatus(expv1.MachinePoolStatus{ + Replicas: int32(2), + ReadyReplicas: int32(2), + AvailableReplicas: int32(2), + UnavailableReplicas: int32(0), + }). + Build(), + }, + }, + }, + UpgradeTracker: func() *scope.UpgradeTracker { + ut := scope.NewUpgradeTracker() + ut.ControlPlane.IsPendingUpgrade = false + ut.ControlPlane.IsUpgrading = true + ut.MachinePools.MarkPendingUpgrade("mp0-abc123") + return ut + }(), + HookResponseTracker: scope.NewHookResponseTracker(), + }, + wantConditionStatus: corev1.ConditionFalse, + wantConditionReason: clusterv1.TopologyReconciledMachinePoolsUpgradePendingReason, + wantConditionMessage: "MachinePool(s) mp0-abc123 rollout and upgrade to version v1.22.0 on hold. Control plane is upgrading to version v1.22.0", + }, { name: "should set the condition to false if control plane picked the new version but machine deployments did not because control plane is scaling", reconcileErr: nil, @@ -332,6 +423,51 @@ func TestReconcileTopologyReconciledCondition(t *testing.T) { wantConditionReason: clusterv1.TopologyReconciledMachineDeploymentsUpgradePendingReason, wantConditionMessage: "MachineDeployment(s) md0-abc123 rollout and upgrade to version v1.22.0 on hold. Control plane is reconciling desired replicas", }, + { + name: "should set the condition to false if control plane picked the new version but machine pools did not because control plane is scaling", + reconcileErr: nil, + cluster: &clusterv1.Cluster{}, + s: &scope.Scope{ + Blueprint: &scope.ClusterBlueprint{ + Topology: &clusterv1.Topology{ + Version: "v1.22.0", + }, + }, + Current: &scope.ClusterState{ + Cluster: &clusterv1.Cluster{}, + ControlPlane: &scope.ControlPlaneState{ + Object: builder.ControlPlane("ns1", "controlplane1"). + WithVersion("v1.22.0"). + WithReplicas(3). + Build(), + }, + MachinePools: scope.MachinePoolsStateMap{ + "mp0": &scope.MachinePoolState{ + Object: builder.MachinePool("ns1", "mp0-abc123"). + WithReplicas(2). + WithStatus(expv1.MachinePoolStatus{ + Replicas: int32(2), + ReadyReplicas: int32(2), + AvailableReplicas: int32(2), + UnavailableReplicas: int32(0), + }). + Build(), + }, + }, + }, + UpgradeTracker: func() *scope.UpgradeTracker { + ut := scope.NewUpgradeTracker() + ut.ControlPlane.IsPendingUpgrade = false + ut.ControlPlane.IsScaling = true + ut.MachinePools.MarkPendingUpgrade("mp0-abc123") + return ut + }(), + HookResponseTracker: scope.NewHookResponseTracker(), + }, + wantConditionStatus: corev1.ConditionFalse, + wantConditionReason: clusterv1.TopologyReconciledMachinePoolsUpgradePendingReason, + wantConditionMessage: "MachinePool(s) mp0-abc123 rollout and upgrade to version v1.22.0 on hold. Control plane is reconciling desired replicas", + }, { name: "should set the condition to false if control plane picked the new version but there are machine deployments pending create because control plane is scaling", reconcileErr: nil, @@ -365,7 +501,39 @@ func TestReconcileTopologyReconciledCondition(t *testing.T) { wantConditionMessage: "MachineDeployment(s) for Topologies md0 creation on hold. Control plane is reconciling desired replicas", }, { - name: "should set the condition to true if control plane picked the new version and is upgrading but there are no machine deployments", + name: "should set the condition to false if control plane picked the new version but there are machine pools pending create because control plane is scaling", + reconcileErr: nil, + cluster: &clusterv1.Cluster{}, + s: &scope.Scope{ + Blueprint: &scope.ClusterBlueprint{ + Topology: &clusterv1.Topology{ + Version: "v1.22.0", + }, + }, + Current: &scope.ClusterState{ + Cluster: &clusterv1.Cluster{}, + ControlPlane: &scope.ControlPlaneState{ + Object: builder.ControlPlane("ns1", "controlplane1"). + WithVersion("v1.22.0"). + WithReplicas(3). + Build(), + }, + }, + UpgradeTracker: func() *scope.UpgradeTracker { + ut := scope.NewUpgradeTracker() + ut.ControlPlane.IsPendingUpgrade = false + ut.ControlPlane.IsScaling = true + ut.MachinePools.MarkPendingCreate("mp0") + return ut + }(), + HookResponseTracker: scope.NewHookResponseTracker(), + }, + wantConditionStatus: corev1.ConditionFalse, + wantConditionReason: clusterv1.TopologyReconciledMachinePoolsCreatePendingReason, + wantConditionMessage: "MachinePool(s) for Topologies mp0 creation on hold. Control plane is reconciling desired replicas", + }, + { + name: "should set the condition to true if control plane picked the new version and is upgrading but there are no machine deployments or machine pools", reconcileErr: nil, cluster: &clusterv1.Cluster{}, s: &scope.Scope{ @@ -394,7 +562,7 @@ func TestReconcileTopologyReconciledCondition(t *testing.T) { wantConditionStatus: corev1.ConditionTrue, }, { - name: "should set the condition to true if control plane picked the new version and is scaling but there are no machine deployments", + name: "should set the condition to true if control plane picked the new version and is scaling but there are no machine deployments or machine pools", reconcileErr: nil, cluster: &clusterv1.Cluster{}, s: &scope.Scope{ @@ -503,6 +671,75 @@ func TestReconcileTopologyReconciledCondition(t *testing.T) { wantConditionReason: clusterv1.TopologyReconciledMachineDeploymentsUpgradePendingReason, wantConditionMessage: "MachineDeployment(s) md1-abc123 rollout and upgrade to version v1.22.0 on hold. MachineDeployment(s) md0-abc123 are upgrading", }, + { + name: "should set the condition to false is some machine pools have not picked the new version because other machine pools are upgrading", + reconcileErr: nil, + cluster: &clusterv1.Cluster{}, + s: &scope.Scope{ + Blueprint: &scope.ClusterBlueprint{ + Topology: &clusterv1.Topology{ + Version: "v1.22.0", + }, + }, + Current: &scope.ClusterState{ + Cluster: &clusterv1.Cluster{}, + ControlPlane: &scope.ControlPlaneState{ + Object: builder.ControlPlane("ns1", "controlplane1"). + WithVersion("v1.22.0"). + WithReplicas(3). + Build(), + }, + MachinePools: scope.MachinePoolsStateMap{ + "mp0": &scope.MachinePoolState{ + Object: builder.MachinePool("ns1", "mp0-abc123"). + WithReplicas(2). + WithVersion("v1.22.0"). + WithStatus(expv1.MachinePoolStatus{ + // mp is not ready because we don't have 2 updated, ready and available replicas. + Replicas: int32(2), + ReadyReplicas: int32(1), + AvailableReplicas: int32(1), + UnavailableReplicas: int32(0), + }). + Build(), + }, + "mp1": &scope.MachinePoolState{ + Object: builder.MachinePool("ns1", "mp1-abc123"). + WithReplicas(2). + WithVersion("v1.21.2"). + WithStatus(expv1.MachinePoolStatus{ + Replicas: int32(2), + ReadyReplicas: int32(2), + AvailableReplicas: int32(2), + UnavailableReplicas: int32(0), + }). + Build(), + }, + }, + }, + UpgradeTracker: func() *scope.UpgradeTracker { + ut := scope.NewUpgradeTracker() + ut.ControlPlane.IsPendingUpgrade = false + ut.MachinePools.MarkUpgrading("mp0-abc123") + ut.MachinePools.MarkPendingUpgrade("mp1-abc123") + return ut + }(), + HookResponseTracker: scope.NewHookResponseTracker(), + }, + machines: []*clusterv1.Machine{ + builder.Machine("ns1", "mp0-machine0"). + WithLabels(map[string]string{clusterv1.ClusterTopologyMachinePoolNameLabel: "mp0"}). + WithVersion("v1.21.2"). // Machine's version does not match MachinePool's version + Build(), + builder.Machine("ns1", "mp1-machine0"). + WithLabels(map[string]string{clusterv1.ClusterTopologyMachinePoolNameLabel: "mp1"}). + WithVersion("v1.21.2"). + Build(), + }, + wantConditionStatus: corev1.ConditionFalse, + wantConditionReason: clusterv1.TopologyReconciledMachinePoolsUpgradePendingReason, + wantConditionMessage: "MachinePool(s) mp1-abc123 rollout and upgrade to version v1.22.0 on hold. MachinePool(s) mp0-abc123 are upgrading", + }, { name: "should set the condition to false if some machine deployments have not picked the new version because their upgrade has been deferred", reconcileErr: nil, @@ -563,7 +800,64 @@ func TestReconcileTopologyReconciledCondition(t *testing.T) { wantConditionMessage: "MachineDeployment(s) md1-abc123 rollout and upgrade to version v1.22.0 deferred.", }, { - name: "should set the condition to true if there are no reconcile errors and control plane and all machine deployments picked up the new version", + name: "should set the condition to false if some machine pools have not picked the new version because their upgrade has been deferred", + reconcileErr: nil, + cluster: &clusterv1.Cluster{}, + s: &scope.Scope{ + Blueprint: &scope.ClusterBlueprint{ + Topology: &clusterv1.Topology{ + Version: "v1.22.0", + }, + }, + Current: &scope.ClusterState{ + Cluster: &clusterv1.Cluster{}, + ControlPlane: &scope.ControlPlaneState{ + Object: builder.ControlPlane("ns1", "controlplane1"). + WithVersion("v1.22.0"). + WithReplicas(3). + Build(), + }, + MachinePools: scope.MachinePoolsStateMap{ + "mp0": &scope.MachinePoolState{ + Object: builder.MachinePool("ns1", "mp0-abc123"). + WithReplicas(2). + WithVersion("v1.22.0"). + WithStatus(expv1.MachinePoolStatus{ + Replicas: int32(2), + ReadyReplicas: int32(2), + AvailableReplicas: int32(2), + UnavailableReplicas: int32(0), + }). + Build(), + }, + "mp1": &scope.MachinePoolState{ + Object: builder.MachinePool("ns1", "mp1-abc123"). + WithReplicas(2). + WithVersion("v1.21.2"). + WithStatus(expv1.MachinePoolStatus{ + Replicas: int32(2), + ReadyReplicas: int32(2), + AvailableReplicas: int32(2), + UnavailableReplicas: int32(0), + }). + Build(), + }, + }, + }, + UpgradeTracker: func() *scope.UpgradeTracker { + ut := scope.NewUpgradeTracker() + ut.ControlPlane.IsPendingUpgrade = false + ut.MachinePools.MarkDeferredUpgrade("mp1-abc123") + return ut + }(), + HookResponseTracker: scope.NewHookResponseTracker(), + }, + wantConditionStatus: corev1.ConditionFalse, + wantConditionReason: clusterv1.TopologyReconciledMachinePoolsUpgradeDeferredReason, + wantConditionMessage: "MachinePool(s) mp1-abc123 rollout and upgrade to version v1.22.0 deferred.", + }, + { + name: "should set the condition to true if there are no reconcile errors and control plane and all machine deployments and machine pools picked up the new version", reconcileErr: nil, cluster: &clusterv1.Cluster{}, s: &scope.Scope{ @@ -608,6 +902,32 @@ func TestReconcileTopologyReconciledCondition(t *testing.T) { Build(), }, }, + MachinePools: scope.MachinePoolsStateMap{ + "mp0": &scope.MachinePoolState{ + Object: builder.MachinePool("ns1", "mp0-abc123"). + WithReplicas(2). + WithVersion("v1.22.0"). + WithStatus(expv1.MachinePoolStatus{ + Replicas: int32(1), + ReadyReplicas: int32(1), + AvailableReplicas: int32(1), + UnavailableReplicas: int32(0), + }). + Build(), + }, + "mp1": &scope.MachinePoolState{ + Object: builder.MachinePool("ns1", "mp1-abc123"). + WithReplicas(2). + WithVersion("v1.22.0"). + WithStatus(expv1.MachinePoolStatus{ + Replicas: int32(2), + ReadyReplicas: int32(2), + AvailableReplicas: int32(2), + UnavailableReplicas: int32(0), + }). + Build(), + }, + }, }, UpgradeTracker: func() *scope.UpgradeTracker { ut := scope.NewUpgradeTracker() @@ -637,8 +957,11 @@ func TestReconcileTopologyReconciledCondition(t *testing.T) { objs := []client.Object{} if tt.s != nil && tt.s.Current != nil { - for _, mds := range tt.s.Current.MachineDeployments { - objs = append(objs, mds.Object) + for _, md := range tt.s.Current.MachineDeployments { + objs = append(objs, md.Object) + } + for _, mp := range tt.s.Current.MachinePools { + objs = append(objs, mp.Object) } } for _, m := range tt.machines { @@ -662,7 +985,7 @@ func TestReconcileTopologyReconciledCondition(t *testing.T) { } } -func TestComputeMachineDeploymentNameList(t *testing.T) { +func TestComputeNameList(t *testing.T) { tests := []struct { name string mdList []string diff --git a/internal/controllers/topology/cluster/reconcile_state_test.go b/internal/controllers/topology/cluster/reconcile_state_test.go index 52cbcf05e079..d9171c490d10 100644 --- a/internal/controllers/topology/cluster/reconcile_state_test.go +++ b/internal/controllers/topology/cluster/reconcile_state_test.go @@ -33,6 +33,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" + utilfeature "k8s.io/component-base/featuregate/testing" "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -40,9 +41,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook/admission" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1" runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" + "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/structuredmerge" @@ -722,6 +725,44 @@ func TestReconcile_callAfterClusterUpgrade(t *testing.T) { wantHookToBeCalled: false, wantError: false, }, + { + name: "hook should not be called if the control plane is stable at desired version but MPs are upgrading - hook is marked", + s: &scope.Scope{ + Blueprint: &scope.ClusterBlueprint{ + Topology: &clusterv1.Topology{ + ControlPlane: clusterv1.ControlPlaneTopology{ + Replicas: pointer.Int32(2), + }, + }, + }, + Current: &scope.ClusterState{ + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + Annotations: map[string]string{ + runtimev1.PendingHooksAnnotation: "AfterClusterUpgrade", + }, + }, + Spec: clusterv1.ClusterSpec{}, + }, + ControlPlane: &scope.ControlPlaneState{ + Object: controlPlaneObj, + }, + }, + HookResponseTracker: scope.NewHookResponseTracker(), + UpgradeTracker: func() *scope.UpgradeTracker { + ut := scope.NewUpgradeTracker() + ut.ControlPlane.IsPendingUpgrade = false + ut.MachinePools.MarkUpgrading("mp1") + return ut + }(), + }, + wantMarked: true, + hookResponse: successResponse, + wantHookToBeCalled: false, + wantError: false, + }, { name: "hook should not be called if the control plane is stable at desired version but MDs are pending create - hook is marked", s: &scope.Scope{ @@ -759,6 +800,43 @@ func TestReconcile_callAfterClusterUpgrade(t *testing.T) { wantHookToBeCalled: false, wantError: false, }, + { + name: "hook should not be called if the control plane is stable at desired version but MPs are pending create - hook is marked", + s: &scope.Scope{ + Blueprint: &scope.ClusterBlueprint{ + Topology: &clusterv1.Topology{ + ControlPlane: clusterv1.ControlPlaneTopology{ + Replicas: pointer.Int32(2), + }, + }, + }, + Current: &scope.ClusterState{ + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + Annotations: map[string]string{ + runtimev1.PendingHooksAnnotation: "AfterClusterUpgrade", + }, + }, + Spec: clusterv1.ClusterSpec{}, + }, + ControlPlane: &scope.ControlPlaneState{ + Object: controlPlaneObj, + }}, + HookResponseTracker: scope.NewHookResponseTracker(), + UpgradeTracker: func() *scope.UpgradeTracker { + ut := scope.NewUpgradeTracker() + ut.ControlPlane.IsPendingUpgrade = false + ut.MachinePools.MarkPendingCreate("mp-topology-1") + return ut + }(), + }, + wantMarked: true, + hookResponse: successResponse, + wantHookToBeCalled: false, + wantError: false, + }, { name: "hook should not be called if the control plane is stable at desired version but MDs are pending upgrade - hook is marked", s: &scope.Scope{ @@ -796,6 +874,43 @@ func TestReconcile_callAfterClusterUpgrade(t *testing.T) { wantHookToBeCalled: false, wantError: false, }, + { + name: "hook should not be called if the control plane is stable at desired version but MPs are pending upgrade - hook is marked", + s: &scope.Scope{ + Blueprint: &scope.ClusterBlueprint{ + Topology: &clusterv1.Topology{ + ControlPlane: clusterv1.ControlPlaneTopology{ + Replicas: pointer.Int32(2), + }, + }, + }, + Current: &scope.ClusterState{ + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + Annotations: map[string]string{ + runtimev1.PendingHooksAnnotation: "AfterClusterUpgrade", + }, + }, + Spec: clusterv1.ClusterSpec{}, + }, + ControlPlane: &scope.ControlPlaneState{ + Object: controlPlaneObj, + }}, + HookResponseTracker: scope.NewHookResponseTracker(), + UpgradeTracker: func() *scope.UpgradeTracker { + ut := scope.NewUpgradeTracker() + ut.ControlPlane.IsPendingUpgrade = false + ut.MachinePools.MarkPendingUpgrade("mp1") + return ut + }(), + }, + wantMarked: true, + hookResponse: successResponse, + wantHookToBeCalled: false, + wantError: false, + }, { name: "hook should not be called if the control plane is stable at desired version but MDs upgrade is deferred - hook is marked", s: &scope.Scope{ @@ -835,7 +950,45 @@ func TestReconcile_callAfterClusterUpgrade(t *testing.T) { wantError: false, }, { - name: "hook should be called if the control plane and MDs are stable at the topology version - success response should unmark the hook", + name: "hook should not be called if the control plane is stable at desired version but MPs upgrade is deferred - hook is marked", + s: &scope.Scope{ + Blueprint: &scope.ClusterBlueprint{ + Topology: &clusterv1.Topology{ + ControlPlane: clusterv1.ControlPlaneTopology{ + Replicas: pointer.Int32(2), + }, + }, + }, + Current: &scope.ClusterState{ + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + Annotations: map[string]string{ + runtimev1.PendingHooksAnnotation: "AfterClusterUpgrade", + }, + }, + Spec: clusterv1.ClusterSpec{}, + }, + ControlPlane: &scope.ControlPlaneState{ + Object: controlPlaneObj, + }, + }, + HookResponseTracker: scope.NewHookResponseTracker(), + UpgradeTracker: func() *scope.UpgradeTracker { + ut := scope.NewUpgradeTracker() + ut.ControlPlane.IsPendingUpgrade = false + ut.MachinePools.MarkDeferredUpgrade("mp1") + return ut + }(), + }, + wantMarked: true, + hookResponse: successResponse, + wantHookToBeCalled: false, + wantError: false, + }, + { + name: "hook should be called if the control plane, MDs, and MPs are stable at the topology version - success response should unmark the hook", s: &scope.Scope{ Blueprint: &scope.ClusterBlueprint{ Topology: &clusterv1.Topology{ @@ -872,7 +1025,7 @@ func TestReconcile_callAfterClusterUpgrade(t *testing.T) { wantError: false, }, { - name: "hook should be called if the control plane and MDs are stable at the topology version - failure response should leave the hook marked", + name: "hook should be called if the control plane, MDs, and MPs are stable at the topology version - failure response should leave the hook marked", s: &scope.Scope{ Blueprint: &scope.ClusterBlueprint{ Topology: &clusterv1.Topology{ @@ -2042,6 +2195,340 @@ func TestReconcileMachineDeployments(t *testing.T) { } } +func TestReconcileMachinePools(t *testing.T) { + defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)() + + g := NewWithT(t) + + infrastructureMachinePool1 := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-1").Build() + bootstrapConfig1 := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-1").Build() + mp1 := newFakeMachinePoolTopologyState("mp-1", infrastructureMachinePool1, bootstrapConfig1) + + upgradeTrackerWithmp1PendingCreate := scope.NewUpgradeTracker() + upgradeTrackerWithmp1PendingCreate.MachinePools.MarkPendingCreate("mp-1-topology") + + infrastructureMachinePool2 := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-2").Build() + bootstrapConfig2 := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-2").Build() + mp2 := newFakeMachinePoolTopologyState("mp-2", infrastructureMachinePool2, bootstrapConfig2) + infrastructureMachinePool2WithChanges := infrastructureMachinePool2.DeepCopy() + g.Expect(unstructured.SetNestedField(infrastructureMachinePool2WithChanges.Object, "foo", "spec", "foo")).To(Succeed()) + mp2WithChangedInfrastructureMachinePool := newFakeMachinePoolTopologyState("mp-2", infrastructureMachinePool2WithChanges, bootstrapConfig2) + upgradeTrackerWithmp2PendingUpgrade := scope.NewUpgradeTracker() + upgradeTrackerWithmp2PendingUpgrade.MachinePools.MarkPendingUpgrade("mp-2") + + infrastructureMachinePool3 := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-3").Build() + bootstrapConfig3 := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-3").Build() + mp3 := newFakeMachinePoolTopologyState("mp-3", infrastructureMachinePool3, bootstrapConfig3) + bootstrapConfig3WithChanges := bootstrapConfig3.DeepCopy() + g.Expect(unstructured.SetNestedField(bootstrapConfig3WithChanges.Object, "foo", "spec", "foo")).To(Succeed()) + mp3WithChangedbootstrapConfig := newFakeMachinePoolTopologyState("mp-3", infrastructureMachinePool3, bootstrapConfig3WithChanges) + bootstrapConfig3WithChangeKind := bootstrapConfig3.DeepCopy() + bootstrapConfig3WithChangeKind.SetKind("AnotherGenericbootstrapConfig") + mp3WithChangedbootstrapConfigChangedKind := newFakeMachinePoolTopologyState("mp-3", infrastructureMachinePool3, bootstrapConfig3WithChangeKind) + + infrastructureMachinePool4 := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-4").Build() + bootstrapConfig4 := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-4").Build() + mp4 := newFakeMachinePoolTopologyState("mp-4", infrastructureMachinePool4, bootstrapConfig4) + infrastructureMachinePool4WithChanges := infrastructureMachinePool4.DeepCopy() + g.Expect(unstructured.SetNestedField(infrastructureMachinePool4WithChanges.Object, "foo", "spec", "foo")).To(Succeed()) + bootstrapConfig4WithChanges := bootstrapConfig4.DeepCopy() + g.Expect(unstructured.SetNestedField(bootstrapConfig4WithChanges.Object, "foo", "spec", "foo")).To(Succeed()) + mp4WithChangedObjects := newFakeMachinePoolTopologyState("mp-4", infrastructureMachinePool4WithChanges, bootstrapConfig4WithChanges) + + infrastructureMachinePool5 := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-5").Build() + bootstrapConfig5 := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-5").Build() + mp5 := newFakeMachinePoolTopologyState("mp-5", infrastructureMachinePool5, bootstrapConfig5) + infrastructureMachinePool5WithChangedKind := infrastructureMachinePool5.DeepCopy() + infrastructureMachinePool5WithChangedKind.SetKind("ChangedKind") + mp5WithChangedinfrastructureMachinePoolKind := newFakeMachinePoolTopologyState("mp-4", infrastructureMachinePool5WithChangedKind, bootstrapConfig5) + + infrastructureMachinePool6 := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-6").Build() + bootstrapConfig6 := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-6").Build() + mp6 := newFakeMachinePoolTopologyState("mp-6", infrastructureMachinePool6, bootstrapConfig6) + bootstrapConfig6WithChangedNamespace := bootstrapConfig6.DeepCopy() + bootstrapConfig6WithChangedNamespace.SetNamespace("ChangedNamespace") + mp6WithChangedbootstrapConfigNamespace := newFakeMachinePoolTopologyState("mp-6", infrastructureMachinePool6, bootstrapConfig6WithChangedNamespace) + + infrastructureMachinePool7 := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-7").Build() + bootstrapConfig7 := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-7").Build() + mp7 := newFakeMachinePoolTopologyState("mp-7", infrastructureMachinePool7, bootstrapConfig7) + + infrastructureMachinePool8Create := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-8-create").Build() + bootstrapConfig8Create := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-8-create").Build() + mp8Create := newFakeMachinePoolTopologyState("mp-8-create", infrastructureMachinePool8Create, bootstrapConfig8Create) + infrastructureMachinePool8Delete := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-8-delete").Build() + bootstrapConfig8Delete := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-8-delete").Build() + mp8Delete := newFakeMachinePoolTopologyState("mp-8-delete", infrastructureMachinePool8Delete, bootstrapConfig8Delete) + infrastructureMachinePool8Update := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-8-update").Build() + bootstrapConfig8Update := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-8-update").Build() + mp8Update := newFakeMachinePoolTopologyState("mp-8-update", infrastructureMachinePool8Update, bootstrapConfig8Update) + infrastructureMachinePool8UpdateWithChanges := infrastructureMachinePool8Update.DeepCopy() + g.Expect(unstructured.SetNestedField(infrastructureMachinePool8UpdateWithChanges.Object, "foo", "spec", "foo")).To(Succeed()) + bootstrapConfig8UpdateWithChanges := bootstrapConfig8Update.DeepCopy() + g.Expect(unstructured.SetNestedField(bootstrapConfig8UpdateWithChanges.Object, "foo", "spec", "foo")).To(Succeed()) + mp8UpdateWithChangedObjects := newFakeMachinePoolTopologyState("mp-8-update", infrastructureMachinePool8UpdateWithChanges, bootstrapConfig8UpdateWithChanges) + + infrastructureMachinePool9m := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-9m").Build() + bootstrapConfig9m := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-9m").Build() + mp9 := newFakeMachinePoolTopologyState("mp-9m", infrastructureMachinePool9m, bootstrapConfig9m) + mp9.Object.Spec.Template.ObjectMeta.Labels = map[string]string{clusterv1.ClusterNameLabel: "cluster-1", "foo": "bar"} + mp9WithInstanceSpecificTemplateMetadata := newFakeMachinePoolTopologyState("mp-9m", infrastructureMachinePool9m, bootstrapConfig9m) + mp9WithInstanceSpecificTemplateMetadata.Object.Spec.Template.ObjectMeta.Labels = map[string]string{"foo": "bar"} + + tests := []struct { + name string + current []*scope.MachinePoolState + currentOnlyAPIServer []*scope.MachinePoolState + desired []*scope.MachinePoolState + upgradeTracker *scope.UpgradeTracker + want []*scope.MachinePoolState + wantInfrastructureMachinePoolObjectUpdate map[string]bool + wantBootstrapObjectUpdate map[string]bool + wantErr bool + }{ + { + name: "Should create desired MachinePool if the current does not exists yet", + current: nil, + desired: []*scope.MachinePoolState{mp1}, + want: []*scope.MachinePoolState{mp1}, + wantErr: false, + }, + { + name: "Should skip creating desired MachinePool if it already exists in the apiserver (even if it is not in current state)", + current: nil, + currentOnlyAPIServer: []*scope.MachinePoolState{mp1}, + desired: []*scope.MachinePoolState{mp1}, + want: []*scope.MachinePoolState{mp1}, + wantErr: false, + }, + { + name: "Should not create desired MachinePool if the current does not exists yet and it marked as pending create", + current: nil, + upgradeTracker: upgradeTrackerWithmp1PendingCreate, + desired: []*scope.MachinePoolState{mp1}, + want: nil, + wantErr: false, + }, + { + name: "No-op if current MachinePool is equal to desired", + current: []*scope.MachinePoolState{mp1}, + desired: []*scope.MachinePoolState{mp1}, + want: []*scope.MachinePoolState{mp1}, + wantErr: false, + }, + { + name: "Should update InfrastructureMachinePool", + current: []*scope.MachinePoolState{mp2}, + desired: []*scope.MachinePoolState{mp2WithChangedInfrastructureMachinePool}, + want: []*scope.MachinePoolState{mp2WithChangedInfrastructureMachinePool}, + wantInfrastructureMachinePoolObjectUpdate: map[string]bool{"mp-2": true}, + wantErr: false, + }, + { + name: "Should not update InfrastructureMachinePool if MachinePool is pending upgrade", + current: []*scope.MachinePoolState{mp2}, + desired: []*scope.MachinePoolState{mp2WithChangedInfrastructureMachinePool}, + upgradeTracker: upgradeTrackerWithmp2PendingUpgrade, + want: []*scope.MachinePoolState{mp2}, + wantInfrastructureMachinePoolObjectUpdate: map[string]bool{"mp-2": false}, + wantErr: false, + }, + { + name: "Should update BootstrapConfig", + current: []*scope.MachinePoolState{mp3}, + desired: []*scope.MachinePoolState{mp3WithChangedbootstrapConfig}, + want: []*scope.MachinePoolState{mp3WithChangedbootstrapConfig}, + wantBootstrapObjectUpdate: map[string]bool{"mp-3": true}, + wantErr: false, + }, + { + name: "Should fail update MachinePool because of changed BootstrapConfig kind", + current: []*scope.MachinePoolState{mp3}, + desired: []*scope.MachinePoolState{mp3WithChangedbootstrapConfigChangedKind}, + wantErr: true, + }, + { + name: "Should update InfrastructureMachinePool and BootstrapConfig", + current: []*scope.MachinePoolState{mp4}, + desired: []*scope.MachinePoolState{mp4WithChangedObjects}, + want: []*scope.MachinePoolState{mp4WithChangedObjects}, + wantInfrastructureMachinePoolObjectUpdate: map[string]bool{"mp-4": true}, + wantBootstrapObjectUpdate: map[string]bool{"mp-4": true}, + wantErr: false, + }, + { + name: "Should fail update MachinePool because of changed InfrastructureMachinePool kind", + current: []*scope.MachinePoolState{mp5}, + desired: []*scope.MachinePoolState{mp5WithChangedinfrastructureMachinePoolKind}, + wantErr: true, + }, + { + name: "Should fail update MachinePool because of changed bootstrapConfig namespace", + current: []*scope.MachinePoolState{mp6}, + desired: []*scope.MachinePoolState{mp6WithChangedbootstrapConfigNamespace}, + wantErr: true, + }, + { + name: "Should delete MachinePool", + current: []*scope.MachinePoolState{mp7}, + desired: []*scope.MachinePoolState{}, + want: []*scope.MachinePoolState{}, + wantErr: false, + }, + { + name: "Should create, update and delete MachinePools", + current: []*scope.MachinePoolState{mp8Update, mp8Delete}, + desired: []*scope.MachinePoolState{mp8Create, mp8UpdateWithChangedObjects}, + want: []*scope.MachinePoolState{mp8Create, mp8UpdateWithChangedObjects}, + wantInfrastructureMachinePoolObjectUpdate: map[string]bool{"mp-8-update": true}, + wantBootstrapObjectUpdate: map[string]bool{"mp-8-update": true}, + wantErr: false, + }, + { + name: "Enforce template metadata", + current: []*scope.MachinePoolState{mp9WithInstanceSpecificTemplateMetadata}, + desired: []*scope.MachinePoolState{mp9}, + want: []*scope.MachinePoolState{mp9}, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + // Create namespace and modify input to have correct namespace set + namespace, err := env.CreateNamespace(ctx, "reconcile-machine-pools") + g.Expect(err).ToNot(HaveOccurred()) + for i, s := range tt.current { + tt.current[i] = prepareMachinePoolState(s, namespace.GetName()) + } + for i, s := range tt.desired { + tt.desired[i] = prepareMachinePoolState(s, namespace.GetName()) + } + for i, s := range tt.want { + tt.want[i] = prepareMachinePoolState(s, namespace.GetName()) + } + + for _, s := range tt.current { + g.Expect(env.PatchAndWait(ctx, s.InfrastructureMachinePoolObject, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed()) + g.Expect(env.PatchAndWait(ctx, s.BootstrapObject, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed()) + g.Expect(env.PatchAndWait(ctx, s.Object, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed()) + } + + currentMachinePoolStates := toMachinePoolTopologyStateMap(tt.current) + s := scope.New(builder.Cluster(namespace.GetName(), "cluster-1").Build()) + s.Current.MachinePools = currentMachinePoolStates + + // currentOnlyAPIServer mps only exist in the APIserver but are not part of s.Current. + // This simulates that getCurrentMachinePoolState in current_state.go read a stale mp list. + for _, s := range tt.currentOnlyAPIServer { + mpState := prepareMachinePoolState(s, namespace.GetName()) + + g.Expect(env.PatchAndWait(ctx, mpState.InfrastructureMachinePoolObject, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed()) + g.Expect(env.PatchAndWait(ctx, mpState.BootstrapObject, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed()) + g.Expect(env.PatchAndWait(ctx, mpState.Object, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed()) + } + + s.Desired = &scope.ClusterState{MachinePools: toMachinePoolTopologyStateMap(tt.desired)} + + if tt.upgradeTracker != nil { + s.UpgradeTracker = tt.upgradeTracker + } + + r := Reconciler{ + Client: env.GetClient(), + APIReader: env.GetAPIReader(), + patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()), + recorder: env.GetEventRecorderFor("test"), + } + err = r.reconcileMachinePools(ctx, s) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + return + } + g.Expect(err).ToNot(HaveOccurred()) + + var gotMachinePoolList expv1.MachinePoolList + g.Expect(env.GetAPIReader().List(ctx, &gotMachinePoolList, &client.ListOptions{Namespace: namespace.GetName()})).To(Succeed()) + g.Expect(gotMachinePoolList.Items).To(HaveLen(len(tt.want))) + + if tt.want == nil { + // No machine Pools should exist. + g.Expect(gotMachinePoolList.Items).To(BeEmpty()) + } + + for _, wantMachinePoolState := range tt.want { + for _, gotMachinePool := range gotMachinePoolList.Items { + if wantMachinePoolState.Object.Name != gotMachinePool.Name { + continue + } + currentMachinePoolTopologyName := wantMachinePoolState.Object.ObjectMeta.Labels[clusterv1.ClusterTopologyMachinePoolNameLabel] + currentMachinePoolState := currentMachinePoolStates[currentMachinePoolTopologyName] + + // Copy over the name of the newly created InfrastructureRef and Bootsrap.ConfigRef because they get a generated name + wantMachinePoolState.Object.Spec.Template.Spec.InfrastructureRef.Name = gotMachinePool.Spec.Template.Spec.InfrastructureRef.Name + if gotMachinePool.Spec.Template.Spec.Bootstrap.ConfigRef != nil { + wantMachinePoolState.Object.Spec.Template.Spec.Bootstrap.ConfigRef.Name = gotMachinePool.Spec.Template.Spec.Bootstrap.ConfigRef.Name + } + + // Compare MachinePool. + // Note: We're intentionally only comparing Spec as otherwise we would have to account for + // empty vs. filled out TypeMeta. + g.Expect(gotMachinePool.Spec).To(BeComparableTo(wantMachinePoolState.Object.Spec)) + + // Compare BootstrapObject. + gotBootstrapObjectRef := gotMachinePool.Spec.Template.Spec.Bootstrap.ConfigRef + gotBootstrapObject := unstructured.Unstructured{} + gotBootstrapObject.SetKind(gotBootstrapObjectRef.Kind) + gotBootstrapObject.SetAPIVersion(gotBootstrapObjectRef.APIVersion) + + err = env.GetAPIReader().Get(ctx, client.ObjectKey{ + Namespace: gotBootstrapObjectRef.Namespace, + Name: gotBootstrapObjectRef.Name, + }, &gotBootstrapObject) + + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(&gotBootstrapObject).To(EqualObject(wantMachinePoolState.BootstrapObject, IgnoreAutogeneratedMetadata, IgnoreNameGenerated)) + + // Check BootstrapObject update. + if currentMachinePoolState != nil && currentMachinePoolState.BootstrapObject != nil { + if tt.wantBootstrapObjectUpdate[gotMachinePool.Name] { + g.Expect(currentMachinePoolState.BootstrapObject.GetResourceVersion()).ToNot(Equal(gotBootstrapObject.GetResourceVersion())) + } else { + g.Expect(currentMachinePoolState.BootstrapObject.GetResourceVersion()).To(Equal(gotBootstrapObject.GetResourceVersion())) + } + } + + // Compare InfrastructureMachinePoolObject. + gotInfrastructureMachinePoolObjectRef := gotMachinePool.Spec.Template.Spec.InfrastructureRef + gotInfrastructureMachinePoolObject := unstructured.Unstructured{} + gotInfrastructureMachinePoolObject.SetKind(gotInfrastructureMachinePoolObjectRef.Kind) + gotInfrastructureMachinePoolObject.SetAPIVersion(gotInfrastructureMachinePoolObjectRef.APIVersion) + + err = env.GetAPIReader().Get(ctx, client.ObjectKey{ + Namespace: gotInfrastructureMachinePoolObjectRef.Namespace, + Name: gotInfrastructureMachinePoolObjectRef.Name, + }, &gotInfrastructureMachinePoolObject) + + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(&gotInfrastructureMachinePoolObject).To(EqualObject(wantMachinePoolState.InfrastructureMachinePoolObject, IgnoreAutogeneratedMetadata, IgnoreNameGenerated)) + + // Check InfrastructureMachinePoolObject update. + if currentMachinePoolState != nil && currentMachinePoolState.InfrastructureMachinePoolObject != nil { + if tt.wantInfrastructureMachinePoolObjectUpdate[gotMachinePool.Name] { + g.Expect(currentMachinePoolState.InfrastructureMachinePoolObject.GetResourceVersion()).ToNot(Equal(gotInfrastructureMachinePoolObject.GetResourceVersion())) + } else { + g.Expect(currentMachinePoolState.InfrastructureMachinePoolObject.GetResourceVersion()).To(Equal(gotInfrastructureMachinePoolObject.GetResourceVersion())) + } + } + } + } + }) + } +} + // TestReconcileReferencedObjectSequences tests multiple subsequent calls to reconcileReferencedObject // for a control-plane object to verify that the objects are reconciled as expected by tracking managed fields correctly. // NOTE: by Extension this tests validates managed field handling in mergePatches, and thus its usage in other parts of the @@ -2745,6 +3232,7 @@ func newFakeMachineDeploymentTopologyState(name string, infrastructureMachineTem }). WithClusterName("cluster-1"). WithReplicas(1). + WithMinReadySeconds(1). Build(), InfrastructureMachineTemplate: infrastructureMachineTemplate.DeepCopy(), BootstrapTemplate: bootstrapTemplate.DeepCopy(), @@ -2761,6 +3249,26 @@ func newFakeMachineDeploymentTopologyState(name string, infrastructureMachineTem return mdState } +func newFakeMachinePoolTopologyState(name string, infrastructureMachinePool, bootstrapObject *unstructured.Unstructured) *scope.MachinePoolState { + mpState := &scope.MachinePoolState{ + Object: builder.MachinePool(metav1.NamespaceDefault, name). + WithInfrastructure(infrastructureMachinePool). + WithBootstrap(bootstrapObject). + WithLabels(map[string]string{ + clusterv1.ClusterTopologyMachinePoolNameLabel: name + "-topology", + clusterv1.ClusterTopologyOwnedLabel: "", + }). + WithClusterName("cluster-1"). + WithReplicas(1). + WithMinReadySeconds(1). + Build(), + InfrastructureMachinePoolObject: infrastructureMachinePool.DeepCopy(), + BootstrapObject: bootstrapObject.DeepCopy(), + } + + return mpState +} + func toMachineDeploymentTopologyStateMap(states []*scope.MachineDeploymentState) map[string]*scope.MachineDeploymentState { ret := map[string]*scope.MachineDeploymentState{} for _, state := range states { @@ -2769,6 +3277,14 @@ func toMachineDeploymentTopologyStateMap(states []*scope.MachineDeploymentState) return ret } +func toMachinePoolTopologyStateMap(states []*scope.MachinePoolState) map[string]*scope.MachinePoolState { + ret := map[string]*scope.MachinePoolState{} + for _, state := range states { + ret[state.Object.Labels[clusterv1.ClusterTopologyMachinePoolNameLabel]] = state + } + return ret +} + func TestReconciler_reconcileMachineHealthCheck(t *testing.T) { // create a controlPlane object with enough information to be used as an OwnerReference for the MachineHealthCheck. cp := builder.ControlPlane(metav1.NamespaceDefault, "cp1").Build() @@ -2981,6 +3497,37 @@ func prepareMachineDeploymentState(in *scope.MachineDeploymentState, namespace s return s } +// prepareMachinePoolState deep-copies and returns the input scope and sets +// the given namespace to all relevant objects. +func prepareMachinePoolState(in *scope.MachinePoolState, namespace string) *scope.MachinePoolState { + s := &scope.MachinePoolState{} + if in.BootstrapObject != nil { + s.BootstrapObject = in.BootstrapObject.DeepCopy() + if s.BootstrapObject.GetNamespace() == metav1.NamespaceDefault { + s.BootstrapObject.SetNamespace(namespace) + } + } + if in.InfrastructureMachinePoolObject != nil { + s.InfrastructureMachinePoolObject = in.InfrastructureMachinePoolObject.DeepCopy() + if s.InfrastructureMachinePoolObject.GetNamespace() == metav1.NamespaceDefault { + s.InfrastructureMachinePoolObject.SetNamespace(namespace) + } + } + if in.Object != nil { + s.Object = in.Object.DeepCopy() + if s.Object.GetNamespace() == metav1.NamespaceDefault { + s.Object.SetNamespace(namespace) + } + if s.Object.Spec.Template.Spec.Bootstrap.ConfigRef != nil && s.Object.Spec.Template.Spec.Bootstrap.ConfigRef.Namespace == metav1.NamespaceDefault { + s.Object.Spec.Template.Spec.Bootstrap.ConfigRef.Namespace = namespace + } + if s.Object.Spec.Template.Spec.InfrastructureRef.Namespace == metav1.NamespaceDefault { + s.Object.Spec.Template.Spec.InfrastructureRef.Namespace = namespace + } + } + return s +} + // prepareCluster deep-copies and returns the input Cluster and sets // the given namespace to all relevant objects. func prepareCluster(in *clusterv1.Cluster, namespace string) *clusterv1.Cluster { diff --git a/internal/controllers/topology/cluster/suite_test.go b/internal/controllers/topology/cluster/suite_test.go index ce29c90d006d..2c595a67dc87 100644 --- a/internal/controllers/topology/cluster/suite_test.go +++ b/internal/controllers/topology/cluster/suite_test.go @@ -24,6 +24,7 @@ import ( "time" . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" @@ -33,6 +34,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/api/v1beta1/index" + "sigs.k8s.io/cluster-api/controllers/remote" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/internal/controllers/clusterclass" "sigs.k8s.io/cluster-api/internal/test/envtest" @@ -49,6 +51,7 @@ func init() { _ = clusterv1.AddToScheme(fakeScheme) _ = apiextensionsv1.AddToScheme(fakeScheme) _ = expv1.AddToScheme(fakeScheme) + _ = corev1.AddToScheme(fakeScheme) } func TestMain(m *testing.M) { setupIndexes := func(ctx context.Context, mgr ctrl.Manager) { @@ -71,11 +74,40 @@ func TestMain(m *testing.M) { if err != nil { panic(fmt.Sprintf("unable to create unstructuredCachineClient: %v", err)) } + // Set up a ClusterCacheTracker and ClusterCacheReconciler to provide to controllers + // requiring a connection to a remote cluster + log := ctrl.Log.WithName("remote").WithName("ClusterCacheTracker") + secretCachingClient, err := client.New(mgr.GetConfig(), client.Options{ + HTTPClient: mgr.GetHTTPClient(), + Cache: &client.CacheOptions{ + Reader: mgr.GetCache(), + }, + }) + if err != nil { + panic(fmt.Sprintf("unable to create secretCachingClient: %v", err)) + } + tracker, err := remote.NewClusterCacheTracker( + mgr, + remote.ClusterCacheTrackerOptions{ + Log: &log, + SecretCachingClient: secretCachingClient, + }, + ) + if err != nil { + panic(fmt.Sprintf("unable to create cluster cache tracker: %v", err)) + } + if err := (&remote.ClusterCacheReconciler{ + Client: mgr.GetClient(), + Tracker: tracker, + }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: 1}); err != nil { + panic(fmt.Sprintf("Failed to start ClusterCacheReconciler: %v", err)) + } if err := (&Reconciler{ Client: mgr.GetClient(), APIReader: mgr.GetAPIReader(), UnstructuredCachingClient: unstructuredCachingClient, - }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: 5}); err != nil { + Tracker: tracker, + }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: 1}); err != nil { panic(fmt.Sprintf("unable to create topology cluster reconciler: %v", err)) } if err := (&clusterclass.Reconciler{ diff --git a/internal/test/builder/builders.go b/internal/test/builder/builders.go index 92037392a096..285831a69f81 100644 --- a/internal/test/builder/builders.go +++ b/internal/test/builder/builders.go @@ -1440,15 +1440,16 @@ func (c *TestControlPlaneBuilder) Build() *unstructured.Unstructured { // MachinePoolBuilder holds the variables and objects needed to build a generic MachinePool. type MachinePoolBuilder struct { - namespace string - name string - bootstrap *unstructured.Unstructured - infrastructure *unstructured.Unstructured - version *string - clusterName string - replicas *int32 - labels map[string]string - status *expv1.MachinePoolStatus + namespace string + name string + bootstrap *unstructured.Unstructured + infrastructure *unstructured.Unstructured + version *string + clusterName string + replicas *int32 + labels map[string]string + status *expv1.MachinePoolStatus + minReadySeconds *int32 } // MachinePool creates a MachinePoolBuilder with the given name and namespace. @@ -1501,6 +1502,12 @@ func (m *MachinePoolBuilder) WithStatus(status expv1.MachinePoolStatus) *Machine return m } +// WithMinReadySeconds sets the passed value on the machine pool spec. +func (m *MachinePoolBuilder) WithMinReadySeconds(minReadySeconds int32) *MachinePoolBuilder { + m.minReadySeconds = &minReadySeconds + return m +} + // Build creates a new MachinePool with the variables and objects passed to the MachinePoolBuilder. func (m *MachinePoolBuilder) Build() *expv1.MachinePool { obj := &expv1.MachinePool{ @@ -1514,8 +1521,15 @@ func (m *MachinePoolBuilder) Build() *expv1.MachinePool { Labels: m.labels, }, Spec: expv1.MachinePoolSpec{ - ClusterName: m.clusterName, - Replicas: m.replicas, + ClusterName: m.clusterName, + Replicas: m.replicas, + MinReadySeconds: m.minReadySeconds, + Template: clusterv1.MachineTemplateSpec{ + Spec: clusterv1.MachineSpec{ + Version: m.version, + ClusterName: m.clusterName, + }, + }, }, } if m.bootstrap != nil { @@ -1524,9 +1538,6 @@ func (m *MachinePoolBuilder) Build() *expv1.MachinePool { if m.infrastructure != nil { obj.Spec.Template.Spec.InfrastructureRef = *objToRef(m.infrastructure) } - if m.version != nil { - obj.Spec.Template.Spec.Version = m.version - } if m.status != nil { obj.Status = *m.status } @@ -1546,6 +1557,7 @@ type MachineDeploymentBuilder struct { generation *int64 labels map[string]string status *clusterv1.MachineDeploymentStatus + minReadySeconds *int32 } // MachineDeployment creates a MachineDeploymentBuilder with the given name and namespace. @@ -1610,6 +1622,12 @@ func (m *MachineDeploymentBuilder) WithStatus(status clusterv1.MachineDeployment return m } +// WithMinReadySeconds sets the passed value on the machine deployment spec. +func (m *MachineDeploymentBuilder) WithMinReadySeconds(minReadySeconds int32) *MachineDeploymentBuilder { + m.minReadySeconds = &minReadySeconds + return m +} + // Build creates a new MachineDeployment with the variables and objects passed to the MachineDeploymentBuilder. func (m *MachineDeploymentBuilder) Build() *clusterv1.MachineDeployment { obj := &clusterv1.MachineDeployment{ @@ -1653,6 +1671,7 @@ func (m *MachineDeploymentBuilder) Build() *clusterv1.MachineDeployment { clusterv1.ClusterNameLabel: m.clusterName, } } + obj.Spec.MinReadySeconds = m.minReadySeconds return obj } diff --git a/internal/test/builder/infrastructure.go b/internal/test/builder/infrastructure.go index 8e0d78e197e5..dfcb91775061 100644 --- a/internal/test/builder/infrastructure.go +++ b/internal/test/builder/infrastructure.go @@ -37,9 +37,13 @@ var ( // GenericInfrastructureMachinePoolTemplateKind is the Kind for the GenericInfrastructureMachinePoolTemplate. GenericInfrastructureMachinePoolTemplateKind = "GenericInfrastructureMachinePoolTemplate" + // GenericInfrastructureMachinePoolTemplateCRD is a generic infrastructure machine pool template CRD. + GenericInfrastructureMachinePoolTemplateCRD = untypedCRD(InfrastructureGroupVersion.WithKind(GenericInfrastructureMachinePoolTemplateKind)) // GenericInfrastructureMachinePoolKind is the Kind for the GenericInfrastructureMachinePool. GenericInfrastructureMachinePoolKind = "GenericInfrastructureMachinePool" + // GenericInfrastructureMachinePoolCRD is a generic infrastructure machine pool CRD. + GenericInfrastructureMachinePoolCRD = untypedCRD(InfrastructureGroupVersion.WithKind(GenericInfrastructureMachinePoolKind)) // GenericInfrastructureClusterKind is the kind for the GenericInfrastructureCluster type. GenericInfrastructureClusterKind = "GenericInfrastructureCluster" @@ -70,9 +74,13 @@ var ( // TestInfrastructureMachinePoolTemplateKind is the kind for the TestInfrastructureMachinePoolTemplate type. TestInfrastructureMachinePoolTemplateKind = "TestInfrastructureMachinePoolTemplate" + // TestInfrastructureMachinePoolTemplateCRD is a test infrastructure machine pool template CRD. + TestInfrastructureMachinePoolTemplateCRD = testInfrastructureMachinePoolTemplateCRD(InfrastructureGroupVersion.WithKind(TestInfrastructureMachinePoolTemplateKind)) // TestInfrastructureMachinePoolKind is the kind for the TestInfrastructureMachinePool type. TestInfrastructureMachinePoolKind = "TestInfrastructureMachinePool" + // TestInfrastructureMachinePoolCRD is a test infrastructure machine CRD. + TestInfrastructureMachinePoolCRD = testInfrastructureMachinePoolCRD(InfrastructureGroupVersion.WithKind(TestInfrastructureMachinePoolKind)) // TestInfrastructureMachineKind is the kind for the TestInfrastructureMachine type. TestInfrastructureMachineKind = "TestInfrastructureMachine" @@ -147,6 +155,29 @@ func testInfrastructureMachineTemplateCRD(gvk schema.GroupVersionKind) *apiexten }) } +func testInfrastructureMachinePoolTemplateCRD(gvk schema.GroupVersionKind) *apiextensionsv1.CustomResourceDefinition { + return generateCRD(gvk, map[string]apiextensionsv1.JSONSchemaProps{ + "metadata": { + // NOTE: in CRD there is only a partial definition of metadata schema. + // Ref https://github.com/kubernetes-sigs/controller-tools/blob/59485af1c1f6a664655dad49543c474bb4a0d2a2/pkg/crd/gen.go#L185 + Type: "object", + }, + "spec": { + Type: "object", + Properties: map[string]apiextensionsv1.JSONSchemaProps{ + // Mandatory field from the Cluster API contract + "template": { + Type: "object", + Properties: map[string]apiextensionsv1.JSONSchemaProps{ + "metadata": metadataSchema, + "spec": machinePoolSpecSchema, + }, + }, + }, + }, + }) +} + func testInfrastructureMachineCRD(gvk schema.GroupVersionKind) *apiextensionsv1.CustomResourceDefinition { return generateCRD(gvk, map[string]apiextensionsv1.JSONSchemaProps{ "metadata": { @@ -168,6 +199,27 @@ func testInfrastructureMachineCRD(gvk schema.GroupVersionKind) *apiextensionsv1. }) } +func testInfrastructureMachinePoolCRD(gvk schema.GroupVersionKind) *apiextensionsv1.CustomResourceDefinition { + return generateCRD(gvk, map[string]apiextensionsv1.JSONSchemaProps{ + "metadata": { + // NOTE: in CRD there is only a partial definition of metadata schema. + // Ref https://github.com/kubernetes-sigs/controller-tools/blob/59485af1c1f6a664655dad49543c474bb4a0d2a2/pkg/crd/gen.go#L185 + Type: "object", + }, + "spec": machinePoolSpecSchema, + "status": { + Type: "object", + Properties: map[string]apiextensionsv1.JSONSchemaProps{ + // mandatory field from the Cluster API contract + "ready": {Type: "boolean"}, + // General purpose fields to be used in different test scenario. + "foo": {Type: "string"}, + "bar": {Type: "string"}, + }, + }, + }) +} + var ( clusterSpecSchema = apiextensionsv1.JSONSchemaProps{ Type: "object", @@ -215,4 +267,22 @@ var ( "bar": {Type: "string"}, }, } + + machinePoolSpecSchema = apiextensionsv1.JSONSchemaProps{ + Type: "object", + Properties: map[string]apiextensionsv1.JSONSchemaProps{ + // Mandatory field from the Cluster API contract + "providerIDList": { + Type: "array", + Items: &apiextensionsv1.JSONSchemaPropsOrArray{ + Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, + // General purpose fields to be used in different test scenario. + "foo": {Type: "string"}, + "bar": {Type: "string"}, + }, + } ) diff --git a/internal/test/builder/zz_generated.deepcopy.go b/internal/test/builder/zz_generated.deepcopy.go index c6a1e986ca36..0d7704823008 100644 --- a/internal/test/builder/zz_generated.deepcopy.go +++ b/internal/test/builder/zz_generated.deepcopy.go @@ -438,6 +438,11 @@ func (in *MachineDeploymentBuilder) DeepCopyInto(out *MachineDeploymentBuilder) *out = new(v1beta1.MachineDeploymentStatus) (*in).DeepCopyInto(*out) } + if in.minReadySeconds != nil { + in, out := &in.minReadySeconds, &out.minReadySeconds + *out = new(int32) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentBuilder. @@ -620,6 +625,11 @@ func (in *MachinePoolBuilder) DeepCopyInto(out *MachinePoolBuilder) { *out = new(apiv1beta1.MachinePoolStatus) (*in).DeepCopyInto(*out) } + if in.minReadySeconds != nil { + in, out := &in.minReadySeconds, &out.minReadySeconds + *out = new(int32) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePoolBuilder. diff --git a/internal/test/envtest/environment.go b/internal/test/envtest/environment.go index e6028ce1f0ca..1d8668d1d59e 100644 --- a/internal/test/envtest/environment.go +++ b/internal/test/envtest/environment.go @@ -212,6 +212,8 @@ func newEnvironment(uncachedObjs ...client.Object) *Environment { builder.GenericControlPlaneTemplateCRD.DeepCopy(), builder.GenericInfrastructureMachineCRD.DeepCopy(), builder.GenericInfrastructureMachineTemplateCRD.DeepCopy(), + builder.GenericInfrastructureMachinePoolCRD.DeepCopy(), + builder.GenericInfrastructureMachinePoolTemplateCRD.DeepCopy(), builder.GenericInfrastructureClusterCRD.DeepCopy(), builder.GenericInfrastructureClusterTemplateCRD.DeepCopy(), builder.GenericRemediationCRD.DeepCopy(), @@ -219,6 +221,8 @@ func newEnvironment(uncachedObjs ...client.Object) *Environment { builder.TestInfrastructureClusterTemplateCRD.DeepCopy(), builder.TestInfrastructureClusterCRD.DeepCopy(), builder.TestInfrastructureMachineTemplateCRD.DeepCopy(), + builder.TestInfrastructureMachinePoolCRD.DeepCopy(), + builder.TestInfrastructureMachinePoolTemplateCRD.DeepCopy(), builder.TestInfrastructureMachineCRD.DeepCopy(), builder.TestBootstrapConfigTemplateCRD.DeepCopy(), builder.TestBootstrapConfigCRD.DeepCopy(),