Skip to content

Commit

Permalink
Fixed error
Browse files Browse the repository at this point in the history
  • Loading branch information
willie-yao committed Sep 14, 2023
1 parent a3cbd60 commit cdf0877
Show file tree
Hide file tree
Showing 3 changed files with 48 additions and 5 deletions.
1 change: 1 addition & 0 deletions controllers/remote/cluster_cache_tracker_fake.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ func NewTestClusterCacheTracker(log logr.Logger, cl client.Client, scheme *runti
client: cl,
scheme: scheme,
clusterAccessors: make(map[client.ObjectKey]*clusterAccessor),
clusterLock: newKeyedMutex(),
}

testCacheTracker.clusterAccessors[objKey] = &clusterAccessor{
Expand Down
19 changes: 16 additions & 3 deletions internal/controllers/topology/cluster/cluster_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ import (
fakeruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client/fake"
"sigs.k8s.io/cluster-api/internal/test/builder"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/kubeconfig"
"sigs.k8s.io/cluster-api/util/patch"
)

Expand Down Expand Up @@ -217,9 +218,10 @@ func TestClusterReconciler_reconcileUpdateOnClusterTopology(t *testing.T) {
g.Expect(err).ToNot(HaveOccurred())
clusterWithTopologyChange := actualCluster.DeepCopy()
clusterWithTopologyChange.Spec.Topology.Workers.MachineDeployments[0].Replicas = &replicas
clusterWithTopologyChange.Spec.Topology.Workers.MachinePools[0].Replicas = &replicas
g.Expect(patchHelper.Patch(ctx, clusterWithTopologyChange)).Should(Succeed())

// Check to ensure all objects are correctly reconciled with the new MachineDeployment replica count in Topology.
// Check to ensure all objects are correctly reconciled with the new MachineDeployment and MachinePool replica count in Topology.
g.Eventually(func(g Gomega) error {
// Get the cluster object.
updatedCluster := &clusterv1.Cluster{}
Expand All @@ -230,6 +232,9 @@ func TestClusterReconciler_reconcileUpdateOnClusterTopology(t *testing.T) {
// Check to ensure the replica count has been successfully updated in the API server and cache.
g.Expect(updatedCluster.Spec.Topology.Workers.MachineDeployments[0].Replicas).To(Equal(&replicas))

// Check to ensure the replica count has been successfully updated in the API server and cache.
g.Expect(updatedCluster.Spec.Topology.Workers.MachinePools[0].Replicas).To(Equal(&replicas))

// Check if Cluster has relevant Infrastructure and ControlPlane and labels and annotations.
g.Expect(assertClusterReconcile(updatedCluster)).Should(Succeed())

Expand All @@ -243,7 +248,7 @@ func TestClusterReconciler_reconcileUpdateOnClusterTopology(t *testing.T) {
g.Expect(assertMachineDeploymentsReconcile(updatedCluster)).Should(Succeed())

// Check if MachinePools are created and have the correct version, replicas, labels annotations and templates.
g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed())
g.Expect(assertMachinePoolsReconcile(updatedCluster)).Should(Succeed())

// Check if the Cluster has the relevant TopologyReconciledCondition.
g.Expect(assertClusterTopologyReconciledCondition(actualCluster)).Should(Succeed())
Expand Down Expand Up @@ -751,7 +756,7 @@ func setupTestEnvForIntegrationTests(ns *corev1.Namespace) (func() error, error)
Build()
infrastructureMachinePoolTemplate1 := builder.TestInfrastructureMachinePoolTemplate(ns.Name, infrastructureMachinePoolTemplateName1).Build()
infrastructureMachinePoolTemplate2 := builder.TestInfrastructureMachinePoolTemplate(ns.Name, infrastructureMachinePoolTemplateName2).
WithSpecFields(map[string]interface{}{"spec.template.spec.fakeSetting": true}).
WithSpecFields(map[string]interface{}{"spec.template.fakeSetting": true}).
Build()
infrastructureClusterTemplate1 := builder.TestInfrastructureClusterTemplate(ns.Name, "infraclustertemplate1").
Build()
Expand Down Expand Up @@ -855,6 +860,12 @@ func setupTestEnvForIntegrationTests(ns *corev1.Namespace) (func() error, error)
Build()).
Build()

cluster1Secret := kubeconfig.GenerateSecret(cluster1, kubeconfig.FromEnvTestConfig(env.Config, cluster1))
cluster2Secret := kubeconfig.GenerateSecret(cluster2, kubeconfig.FromEnvTestConfig(env.Config, cluster2))
// Unset the ownerrefs otherwise they are invalid because they contain an empty uid.
cluster1Secret.ObjectMeta.OwnerReferences = nil
cluster2Secret.ObjectMeta.OwnerReferences = nil

// Create a set of setupTestEnvForIntegrationTests from the objects above to add to the API server when the test environment starts.
// The objects are created for every test, though some e.g. infrastructureMachineTemplate2 may not be used in every test.
initObjs := []client.Object{
Expand All @@ -870,6 +881,8 @@ func setupTestEnvForIntegrationTests(ns *corev1.Namespace) (func() error, error)
clusterClassForRebase,
cluster1,
cluster2,
cluster1Secret,
cluster2Secret,
}
cleanup := func() error {
// Delete Objects in reverse, because we cannot delete a ClusterCLass if it is still used by a Cluster.
Expand Down
33 changes: 31 additions & 2 deletions internal/controllers/topology/cluster/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
"time"

. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
Expand All @@ -50,6 +51,7 @@ func init() {
_ = clusterv1.AddToScheme(fakeScheme)
_ = apiextensionsv1.AddToScheme(fakeScheme)
_ = expv1.AddToScheme(fakeScheme)
_ = corev1.AddToScheme(fakeScheme)
}
func TestMain(m *testing.M) {
setupIndexes := func(ctx context.Context, mgr ctrl.Manager) {
Expand All @@ -63,6 +65,9 @@ func TestMain(m *testing.M) {
}
}
setupReconcilers := func(ctx context.Context, mgr ctrl.Manager) {
// Set up a ClusterCacheTracker and ClusterCacheReconciler to provide to controllers
// requiring a connection to a remote cluster
log := ctrl.Log.WithName("remote").WithName("ClusterCacheTracker")
unstructuredCachingClient, err := client.New(mgr.GetConfig(), client.Options{
Cache: &client.CacheOptions{
Reader: mgr.GetCache(),
Expand All @@ -72,13 +77,37 @@ func TestMain(m *testing.M) {
if err != nil {
panic(fmt.Sprintf("unable to create unstructuredCachineClient: %v", err))
}
tracker := remote.NewTestClusterCacheTracker(mgr.GetLogger(), mgr.GetClient(), mgr.GetScheme(), client.ObjectKey{Name: "test-cluster"}, "test-namespace/test-object")
secretCachingClient, err := client.New(mgr.GetConfig(), client.Options{
HTTPClient: mgr.GetHTTPClient(),
Cache: &client.CacheOptions{
Reader: mgr.GetCache(),
},
})
if err != nil {
panic(fmt.Sprintf("unable to create secretCachingClient: %v", err))
}
tracker, err := remote.NewClusterCacheTracker(
mgr,
remote.ClusterCacheTrackerOptions{
Log: &log,
SecretCachingClient: secretCachingClient,
},
)
if err != nil {
panic(fmt.Sprintf("unable to create cluster cache tracker: %v", err))
}
if err := (&remote.ClusterCacheReconciler{
Client: mgr.GetClient(),
Tracker: tracker,
}).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: 1}); err != nil {
panic(fmt.Sprintf("Failed to start ClusterCacheReconciler: %v", err))
}
if err := (&Reconciler{
Client: mgr.GetClient(),
APIReader: mgr.GetAPIReader(),
UnstructuredCachingClient: unstructuredCachingClient,
Tracker: tracker,
}).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: 5}); err != nil {
}).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: 1}); err != nil {
panic(fmt.Sprintf("unable to create topology cluster reconciler: %v", err))
}
if err := (&clusterclass.Reconciler{
Expand Down

0 comments on commit cdf0877

Please sign in to comment.