From 45d7ce480f413fb5a481fe49f2284724473ea5a2 Mon Sep 17 00:00:00 2001 From: Matous Jobanek Date: Tue, 27 Aug 2024 12:08:38 +0200 Subject: [PATCH] add new separateKustomizeComponent and skipMembers fields (#67) --- pkg/assets/sandbox_config.go | 16 +- pkg/cmd/generate/admin-manifests.go | 31 +++- pkg/cmd/generate/admin-manifests_test.go | 111 ++++++++----- pkg/cmd/generate/assertion_test.go | 14 ++ pkg/cmd/generate/cluster.go | 10 +- pkg/cmd/generate/cluster_test.go | 72 ++++++++ pkg/cmd/generate/mock_test.go | 16 +- pkg/cmd/generate/util.go | 3 + pkg/cmd/generate/util_test.go | 156 ++++++++++-------- pkg/test/environment_config.go | 42 ++++- .../kubesaw-admins.yaml | 16 ++ 11 files changed, 362 insertions(+), 125 deletions(-) diff --git a/pkg/assets/sandbox_config.go b/pkg/assets/sandbox_config.go index 49bb222..556acb2 100644 --- a/pkg/assets/sandbox_config.go +++ b/pkg/assets/sandbox_config.go @@ -14,6 +14,10 @@ type Clusters struct { type MemberCluster struct { Name string `yaml:"name"` ClusterConfig `yaml:",inline"` + // SeparateKustomizeComponent when set to true, then the manifests for the member cluster will be generated in a separate + // Kustomize component (a directory structure that will contain all the generated manifests including the kustomization.yaml files). + // The name of the root folder will have the same name as the name of the member cluster. + SeparateKustomizeComponent bool `yaml:"separateKustomizeComponent"` } type ClusterConfig struct { @@ -21,16 +25,24 @@ type ClusterConfig struct { } type ServiceAccount struct { - Name string `yaml:"name"` - Namespace string `yaml:"namespace,omitempty"` + Name string `yaml:"name"` + Namespace string `yaml:"namespace,omitempty"` + Selector Selector `yaml:"selector"` PermissionsPerClusterType `yaml:",inline"` } +// Selector contains fields to select clusters the entity should (not) be applied for +type Selector struct { + // SkipMembers can contain a list of member cluster names the entity shouldn't be applied for + SkipMembers []string `yaml:"skipMembers,omitempty"` +} + type User struct { Name string `yaml:"name"` ID []string `yaml:"id"` AllClusters bool `yaml:"allClusters,omitempty"` // force user and identity manifest generation on all clusters, even if no permissions are specified Groups []string `yaml:"groups"` + Selector Selector `yaml:"selector"` PermissionsPerClusterType `yaml:",inline,omitempty"` } diff --git a/pkg/cmd/generate/admin-manifests.go b/pkg/cmd/generate/admin-manifests.go index 0e19ff9..aee7893 100644 --- a/pkg/cmd/generate/admin-manifests.go +++ b/pkg/cmd/generate/admin-manifests.go @@ -1,6 +1,7 @@ package generate import ( + "fmt" "os" "path/filepath" @@ -35,7 +36,7 @@ ksctl generate admin-manifests ./path/to/kubesaw-stage.openshiftapps.com/kubesaw } command.Flags().StringVarP(&f.kubeSawAdminsFile, "kubesaw-admins", "c", "", "Use the given kubesaw-admin file") command.Flags().StringVarP(&f.outDir, "out-dir", "o", "", "Directory where generated manifests should be stored") - command.Flags().BoolVarP(&f.singleCluster, "single-cluster", "s", false, "If host and member are deployed to the same cluster") + command.Flags().BoolVarP(&f.singleCluster, "single-cluster", "s", false, "If host and member are deployed to the same cluster. Cannot be used with separateKustomizeComponent set in one of the members.") command.Flags().StringVar(&f.hostRootDir, "host-root-dir", "host", "The root directory name for host manifests") command.Flags().StringVar(&f.memberRootDir, "member-root-dir", "member", "The root directory name for member manifests") @@ -60,6 +61,13 @@ func adminManifests(term ioutils.Terminal, files assets.FS, flags adminManifests if err != nil { return errs.Wrapf(err, "unable get kubesaw-admins.yaml file from %s", flags.kubeSawAdminsFile) } + if flags.singleCluster { + for _, memberCluster := range kubeSawAdmins.Clusters.Members { + if memberCluster.SeparateKustomizeComponent { + return fmt.Errorf("--single-cluster flag cannot be used with separateKustomizeComponent set in one of the members (%s)", memberCluster.Name) + } + } + } err = os.RemoveAll(flags.outDir) if err != nil { return err @@ -71,12 +79,20 @@ func adminManifests(term ioutils.Terminal, files assets.FS, flags adminManifests files: files, } objsCache := objectsCache{} - if err := ensureCluster(ctx, configuration.Host, objsCache); err != nil { + if err := ensureCluster(ctx, configuration.Host, objsCache, ""); err != nil { return err } - if err := ensureCluster(ctx, configuration.Member, objsCache); err != nil { + if err := ensureCluster(ctx, configuration.Member, objsCache, ""); err != nil { return err } + + for _, memberCluster := range kubeSawAdmins.Clusters.Members { + if memberCluster.SeparateKustomizeComponent { + if err := ensureCluster(ctx, configuration.Member, objsCache, memberCluster.Name); err != nil { + return err + } + } + } return objsCache.writeManifests(ctx) } @@ -87,12 +103,17 @@ type adminManifestsContext struct { files assets.FS } -func ensureCluster(ctx *adminManifestsContext, clusterType configuration.ClusterType, cache objectsCache) error { - ctx.PrintContextSeparatorf("Generating manifests for %s cluster type", clusterType) +func ensureCluster(ctx *adminManifestsContext, clusterType configuration.ClusterType, cache objectsCache, specificKMemberName string) error { + if specificKMemberName == "" { + ctx.PrintContextSeparatorf("Generating manifests for %s cluster type", clusterType) + } else { + ctx.PrintContextSeparatorf("Generating manifests for %s cluster type in the separate Kustomize component: %s", clusterType, specificKMemberName) + } clusterCtx := &clusterContext{ adminManifestsContext: ctx, clusterType: clusterType, + specificKMemberName: specificKMemberName, } if err := ensureServiceAccounts(clusterCtx, cache); err != nil { diff --git a/pkg/cmd/generate/admin-manifests_test.go b/pkg/cmd/generate/admin-manifests_test.go index 60b416d..744d1a6 100644 --- a/pkg/cmd/generate/admin-manifests_test.go +++ b/pkg/cmd/generate/admin-manifests_test.go @@ -26,18 +26,20 @@ func TestAdminManifests(t *testing.T) { kubeSawAdmins := NewKubeSawAdmins( Clusters(HostServerAPI). AddMember("member1", Member1ServerAPI). - AddMember("member2", Member2ServerAPI), + AddMember("member2", Member2ServerAPI, WithSeparateKustomizeComponent()), ServiceAccounts( Sa("john", "", HostRoleBindings("toolchain-host-operator", Role("install-operator"), ClusterRole("admin")), - MemberRoleBindings("toolchain-member-operator", Role("install-operator"), ClusterRole("admin"))), + MemberRoleBindings("toolchain-member-operator", Role("install-operator"), ClusterRole("admin"))). + WithSkippedMembers("member2"), Sa("bob", "", HostRoleBindings("toolchain-host-operator", Role("restart-deployment"), ClusterRole("edit")), MemberRoleBindings("toolchain-member-operator", Role("restart-deployment"), ClusterRole("edit")))), Users( User("john-user", []string{"12345"}, false, "crtadmins-view", HostRoleBindings("toolchain-host-operator", Role("register-cluster"), ClusterRole("edit")), - MemberRoleBindings("toolchain-member-operator", Role("register-cluster"), ClusterRole("edit"))), + MemberRoleBindings("toolchain-member-operator", Role("register-cluster"), ClusterRole("edit"))). + WithSkippedMembers("member2"), User("bob-crtadmin", []string{"67890"}, false, "crtadmins-exec", HostRoleBindings("toolchain-host-operator", Role("restart-deployment"), ClusterRole("admin")), MemberRoleBindings("toolchain-member-operator", Role("restart-deployment"), ClusterRole("admin"))))) @@ -65,19 +67,43 @@ func TestAdminManifests(t *testing.T) { }) t.Run("in single-cluster mode", func(t *testing.T) { - // given - outTempDir, err := os.MkdirTemp("", "admin-manifests-cli-test-") - require.NoError(t, err) - term := NewFakeTerminalWithResponse("Y") - term.Tee(os.Stdout) - flags := newAdminManifestsFlags(outDir(outTempDir), kubeSawAdminsFile(configFile), singleCluster()) - - // when - err = adminManifests(term, files, flags) - - // then - require.NoError(t, err) - verifyFiles(t, flags) + t.Run("fails with separateKustomizeComponent set for member2", func(t *testing.T) { + // given + outTempDir, err := os.MkdirTemp("", "admin-manifests-cli-test-") + require.NoError(t, err) + term := NewFakeTerminalWithResponse("Y") + term.Tee(os.Stdout) + flags := newAdminManifestsFlags(outDir(outTempDir), kubeSawAdminsFile(configFile), singleCluster()) + + // when + err = adminManifests(term, files, flags) + + // then + require.EqualError(t, err, "--single-cluster flag cannot be used with separateKustomizeComponent set in one of the members (member2)") + }) + + t.Run("without separateKustomizeComponent set for member2", func(t *testing.T) { + // given + kubeSawAdmins.Clusters.Members[1].SeparateKustomizeComponent = false + kubeSawAdminsContent, err := yaml.Marshal(kubeSawAdmins) + require.NoError(t, err) + + configFile := createKubeSawAdminsFile(t, "kubesaw.host.openshiftapps.com", kubeSawAdminsContent) + files := newDefaultFiles(t) + + outTempDir, err := os.MkdirTemp("", "admin-manifests-cli-test-") + require.NoError(t, err) + term := NewFakeTerminalWithResponse("Y") + term.Tee(os.Stdout) + flags := newAdminManifestsFlags(outDir(outTempDir), kubeSawAdminsFile(configFile), singleCluster()) + + // when + err = adminManifests(term, files, flags) + + // then + require.NoError(t, err) + verifyFiles(t, flags) + }) }) t.Run("in custom host root directory", func(t *testing.T) { @@ -183,14 +209,8 @@ func storeDummySA(t *testing.T, outDir string) { func verifyFiles(t *testing.T, flags adminManifestsFlags) { dirEntries, err := os.ReadDir(flags.outDir) require.NoError(t, err) - var dirNames []string - if !flags.singleCluster { - assert.Len(t, dirEntries, 2) - dirNames = []string{dirEntries[0].Name(), dirEntries[1].Name()} - } else { - assert.Len(t, dirEntries, 3) - dirNames = []string{dirEntries[0].Name(), dirEntries[1].Name(), dirEntries[2].Name()} - } + assert.Len(t, dirEntries, 3) + dirNames := []string{dirEntries[0].Name(), dirEntries[1].Name(), dirEntries[2].Name()} for _, clusterType := range configuration.ClusterTypes { ns := commontest.HostOperatorNs @@ -203,16 +223,24 @@ func verifyFiles(t *testing.T, flags adminManifestsFlags) { verifyServiceAccounts(t, flags.outDir, expectedRootDir, clusterType, ns) verifyUsers(t, flags.outDir, expectedRootDir, clusterType, ns, flags.singleCluster) } + + if !flags.singleCluster { + // if singleCluster is not used then let's verify that member2 was generated in a separate kustomize component + verifyServiceAccounts(t, flags.outDir, "member2", configuration.Member, commontest.MemberOperatorNs) + verifyUsers(t, flags.outDir, "member2", configuration.Member, commontest.MemberOperatorNs, flags.singleCluster) + } } func verifyServiceAccounts(t *testing.T, outDir, expectedRootDir string, clusterType configuration.ClusterType, roleNs string) { saNs := fmt.Sprintf("sandbox-sre-%s", clusterType) - inKStructure(t, outDir, expectedRootDir). - assertSa(saNs, "john"). - hasRole(roleNs, clusterType.AsSuffix("install-operator"), clusterType.AsSuffix("install-operator-john")). - hasNsClusterRole(roleNs, "admin", clusterType.AsSuffix("clusterrole-admin-john")) - + if expectedRootDir != "member2" { + // john is skipped for member2 (when generated as a separate kustomize component) + inKStructure(t, outDir, expectedRootDir). + assertSa(saNs, "john"). + hasRole(roleNs, clusterType.AsSuffix("install-operator"), clusterType.AsSuffix("install-operator-john")). + hasNsClusterRole(roleNs, "admin", clusterType.AsSuffix("clusterrole-admin-john")) + } inKStructure(t, outDir, expectedRootDir). assertSa(saNs, "bob"). hasRole(roleNs, clusterType.AsSuffix("restart-deployment"), clusterType.AsSuffix("restart-deployment-bob")). @@ -225,20 +253,27 @@ func verifyUsers(t *testing.T, outDir, expectedRootDir string, clusterType confi rootDir = "base" } - inKStructure(t, outDir, rootDir). - assertUser("john-user"). - hasIdentity("12345"). - belongsToGroups(groups("crtadmins-view"), extraGroupsUserIsNotPartOf("crtadmins-exec")) - storageAssertion := inKStructure(t, outDir, expectedRootDir).storageAssertionImpl - newPermissionAssertion(storageAssertion, "", "john-user", "User"). - hasRole(ns, clusterType.AsSuffix("register-cluster"), clusterType.AsSuffix("register-cluster-john-user")). - hasNsClusterRole(ns, "edit", clusterType.AsSuffix("clusterrole-edit-john-user")) + bobsExtraGroupsUserIsNotPartOf := extraGroupsUserIsNotPartOf() + if expectedRootDir != "member2" { + // john is skipped for member2 (when generated as a separate kustomize component) + inKStructure(t, outDir, rootDir). + assertUser("john-user"). + hasIdentity("12345"). + belongsToGroups(groups("crtadmins-view"), extraGroupsUserIsNotPartOf("crtadmins-exec")) + + newPermissionAssertion(storageAssertion, "", "john-user", "User"). + hasRole(ns, clusterType.AsSuffix("register-cluster"), clusterType.AsSuffix("register-cluster-john-user")). + hasNsClusterRole(ns, "edit", clusterType.AsSuffix("clusterrole-edit-john-user")) + + // crtadmins-view group is not generated for member2 at all + bobsExtraGroupsUserIsNotPartOf = extraGroupsUserIsNotPartOf("crtadmins-view") + } inKStructure(t, outDir, rootDir). assertUser("bob-crtadmin"). hasIdentity("67890"). - belongsToGroups(groups("crtadmins-exec"), extraGroupsUserIsNotPartOf("crtadmins-view")) + belongsToGroups(groups("crtadmins-exec"), bobsExtraGroupsUserIsNotPartOf) newPermissionAssertion(storageAssertion, "", "bob-crtadmin", "User"). hasRole(ns, clusterType.AsSuffix("restart-deployment"), clusterType.AsSuffix("restart-deployment-bob-crtadmin")). diff --git a/pkg/cmd/generate/assertion_test.go b/pkg/cmd/generate/assertion_test.go index 90dc547..ac4bfa6 100644 --- a/pkg/cmd/generate/assertion_test.go +++ b/pkg/cmd/generate/assertion_test.go @@ -443,3 +443,17 @@ func (a *objectsCacheAssertion) assertNumberOfRoles(expectedNumber int) *objects assert.Len(a.t, roles, expectedNumber) return a } + +func (a *objectsCacheAssertion) assertNumberOfSAs(expectedNumber int) *objectsCacheAssertion { + roles, err := a.listObjects("serviceaccounts", "ServiceAccount", &corev1.ServiceAccount{}) + require.NoError(a.t, err) + assert.Len(a.t, roles, expectedNumber) + return a +} + +func (a *objectsCacheAssertion) assertNumberOfUsers(expectedNumber int) *objectsCacheAssertion { + roles, err := a.listObjects("users", "User", &userv1.User{}) + require.NoError(a.t, err) + assert.Len(a.t, roles, expectedNumber) + return a +} diff --git a/pkg/cmd/generate/cluster.go b/pkg/cmd/generate/cluster.go index c3c6f10..96fbced 100644 --- a/pkg/cmd/generate/cluster.go +++ b/pkg/cmd/generate/cluster.go @@ -2,11 +2,13 @@ package generate import ( "github.com/kubesaw/ksctl/pkg/configuration" + "k8s.io/utils/strings/slices" ) type clusterContext struct { *adminManifestsContext - clusterType configuration.ClusterType + clusterType configuration.ClusterType + specificKMemberName string } // ensureServiceAccounts reads the list of service accounts definitions and it's permissions. @@ -14,6 +16,9 @@ type clusterContext struct { func ensureServiceAccounts(ctx *clusterContext, objsCache objectsCache) error { ctx.Printlnf("-> Ensuring ServiceAccounts and its RoleBindings...") for _, sa := range ctx.kubeSawAdmins.ServiceAccounts { + if ctx.specificKMemberName != "" && slices.Contains(sa.Selector.SkipMembers, ctx.specificKMemberName) { + continue + } // by default, it should use the sandbox sre namespace. let's keep this empty (if the target namespace is not defined) so it is recognized in the ensureServiceAccount method based on the cluster type it is being applied in saNamespace := "" @@ -42,6 +47,9 @@ func ensureUsers(ctx *clusterContext, objsCache objectsCache) error { ctx.Printlnf("-> Ensuring Users and its RoleBindings...") for _, user := range ctx.kubeSawAdmins.Users { + if ctx.specificKMemberName != "" && slices.Contains(user.Selector.SkipMembers, ctx.specificKMemberName) { + continue + } m := &permissionsManager{ objectsCache: objsCache, createSubject: ensureUserIdentityAndGroups(user.ID, user.Groups), diff --git a/pkg/cmd/generate/cluster_test.go b/pkg/cmd/generate/cluster_test.go index 9893bdc..336808e 100644 --- a/pkg/cmd/generate/cluster_test.go +++ b/pkg/cmd/generate/cluster_test.go @@ -90,6 +90,39 @@ func TestEnsureServiceAccounts(t *testing.T) { hasNsClusterRole(commontest.HostOperatorNs, "view", "clusterrole-view-john-host"). hasClusterRoleBinding("cluster-monitoring-view", "clusterrole-cluster-monitoring-view-john-host") }) + + t.Run("skip SA in a member with separateKustomizeComponent set", func(t *testing.T) { + // given + kubeSawAdmins := NewKubeSawAdmins( + Clusters(HostServerAPI).AddMember("member-1", Member1ServerAPI, WithSeparateKustomizeComponent()), + ServiceAccounts( + Sa("john", "", + permissionsForAllNamespaces...).WithSkippedMembers("member-1"), // will be skipped for the member + Sa("bob", "", + HostRoleBindings("toolchain-host-operator", Role("restart-deployment"), ClusterRole("view")), + MemberRoleBindings("toolchain-member-operator", Role("restart-deployment"), ClusterRole("view"))). + WithSkippedMembers("wrong-member")), // doesn't have any effect on filtering + []assets.User{}) + ctx := newAdminManifestsContextWithDefaultFiles(t, kubeSawAdmins) + clusterCtx := newFakeClusterContext(ctx, configuration.Member, withSpecificKMemberName("member-1")) + t.Cleanup(gock.OffAll) + cache := objectsCache{} + + // when + err := ensureServiceAccounts(clusterCtx, cache) + + // then + require.NoError(t, err) + + inObjectCache(t, ctx.outDir, "member-1", cache). + assertNumberOfSAs(1). + assertNumberOfRoles(1) + + inObjectCache(t, ctx.outDir, "member-1", cache). + assertSa("sandbox-sre-member", "bob"). + hasRole("toolchain-member-operator", configuration.Member.AsSuffix("restart-deployment"), configuration.Member.AsSuffix("restart-deployment-bob")). + hasNsClusterRole("toolchain-member-operator", "view", configuration.Member.AsSuffix("clusterrole-view-bob")) + }) } func TestUsers(t *testing.T) { @@ -158,6 +191,45 @@ func TestUsers(t *testing.T) { }) } }) + + t.Run("skip User in a member with separateKustomizeComponent set", func(t *testing.T) { + // given + kubeSawAdmins := NewKubeSawAdmins( + Clusters(HostServerAPI).AddMember("member-1", Member1ServerAPI, WithSeparateKustomizeComponent()), + ServiceAccounts(), + Users( + User("john-user", []string{"12345"}, false, "crtadmins", + permissionsForAllNamespaces...).WithSkippedMembers("member-1"), // will be skipped for the member + User("bob-crtadmin", []string{"67890"}, false, "crtadmins", + HostRoleBindings("toolchain-host-operator", Role("restart-deployment"), ClusterRole("view")), + MemberRoleBindings("toolchain-member-operator", Role("restart-deployment"), ClusterRole("view")), + MemberClusterRoleBindings("cluster-monitoring-view")). + WithSkippedMembers("wrong-member")), // doesn't have any effect on filtering + ) + ctx := newAdminManifestsContextWithDefaultFiles(t, kubeSawAdmins) + clusterCtx := newFakeClusterContext(ctx, configuration.Member, withSpecificKMemberName("member-1")) + t.Cleanup(gock.OffAll) + cache := objectsCache{} + + // when + err := ensureUsers(clusterCtx, cache) + + // then + require.NoError(t, err) + + inObjectCache(t, ctx.outDir, "member-1", cache). + assertNumberOfUsers(1). + assertNumberOfRoles(1). + assertThatGroupHasUsers("crtadmins", "bob-crtadmin") + + inObjectCache(t, ctx.outDir, "member-1", cache). + assertUser("bob-crtadmin"). + hasIdentity("67890"). + belongsToGroups(groups("crtadmins"), extraGroupsUserIsNotPartOf()). + hasRole("toolchain-member-operator", configuration.Member.AsSuffix("restart-deployment"), configuration.Member.AsSuffix("restart-deployment-bob-crtadmin")). + hasNsClusterRole("toolchain-member-operator", "view", configuration.Member.AsSuffix("clusterrole-view-bob-crtadmin")). + hasClusterRoleBinding("cluster-monitoring-view", configuration.Member.AsSuffix("clusterrole-cluster-monitoring-view-bob-crtadmin")) + }) } func newKubeSawAdminsWithDefaultClusters(serviceAccounts []assets.ServiceAccount, users []assets.User) *assets.KubeSawAdmins { diff --git a/pkg/cmd/generate/mock_test.go b/pkg/cmd/generate/mock_test.go index 130c841..33e67fc 100644 --- a/pkg/cmd/generate/mock_test.go +++ b/pkg/cmd/generate/mock_test.go @@ -67,9 +67,21 @@ func newAdminManifestsContext(t *testing.T, config *assets.KubeSawAdmins, files // ClusterContext part -func newFakeClusterContext(adminManifestsContext *adminManifestsContext, clusterType configuration.ClusterType) *clusterContext { - return &clusterContext{ +func newFakeClusterContext(adminManifestsContext *adminManifestsContext, clusterType configuration.ClusterType, options ...fakeClusterContextOption) *clusterContext { + ctx := &clusterContext{ adminManifestsContext: adminManifestsContext, clusterType: clusterType, } + for _, modify := range options { + modify(ctx) + } + return ctx +} + +type fakeClusterContextOption func(ctx *clusterContext) + +func withSpecificKMemberName(specificKMemberName string) fakeClusterContextOption { + return func(ctx *clusterContext) { + ctx.specificKMemberName = specificKMemberName + } } diff --git a/pkg/cmd/generate/util.go b/pkg/cmd/generate/util.go index 7f1a4d4..f19c52f 100644 --- a/pkg/cmd/generate/util.go +++ b/pkg/cmd/generate/util.go @@ -123,6 +123,9 @@ func filePaths(ctx *clusterContext, obj runtimeclient.Object) (string, string, s memberRootDir: ctx.memberRootDir, hostRootDir: ctx.hostRootDir, } + if ctx.specificKMemberName != "" { + storeCtx.memberRootDir = ctx.specificKMemberName + } defaultPath := filePath(rootDir(storeCtx, ctx.clusterType), obj, plural.Resource) theOtherTypePath := filePath(rootDir(storeCtx, ctx.clusterType.TheOtherType()), obj, plural.Resource) diff --git a/pkg/cmd/generate/util_test.go b/pkg/cmd/generate/util_test.go index 4551731..0c1ad46 100644 --- a/pkg/cmd/generate/util_test.go +++ b/pkg/cmd/generate/util_test.go @@ -20,16 +20,24 @@ func TestEnsureObject(t *testing.T) { // given for _, clusterType := range configuration.ClusterTypes { t.Run("for cluster type "+clusterType.String(), func(t *testing.T) { - - t.Run("for User object", func(t *testing.T) { - verifyEnsureManifest(t, clusterType, &userv1.User{}) - }) - - t.Run("for ServiceAccount object", func(t *testing.T) { - verifyEnsureManifest(t, clusterType, &corev1.ServiceAccount{}) - }) + testEnsureObject(t, clusterType, "") }) } + t.Run("when using specificKMemberName", func(t *testing.T) { + testEnsureObject(t, configuration.Member, "member-1") + }) +} + +func testEnsureObject(t *testing.T, clusterType configuration.ClusterType, specificKMemberName string) { + t.Helper() + + t.Run("for User object", func(t *testing.T) { + verifyEnsureManifest(t, clusterType, &userv1.User{}, specificKMemberName) + }) + + t.Run("for ServiceAccount object", func(t *testing.T) { + verifyEnsureManifest(t, clusterType, &corev1.ServiceAccount{}, specificKMemberName) + }) } func prepareObjects(t *testing.T, name string, namespace string, object runtimeclient.Object) (runtimeclient.Object, runtimeclient.Object) { @@ -49,45 +57,50 @@ func prepareObjects(t *testing.T, name string, namespace string, object runtimec return toBeStored, expectedWithTypeMeta } -func verifyEnsureManifest(t *testing.T, clusterType configuration.ClusterType, object runtimeclient.Object) { +func verifyEnsureManifest(t *testing.T, clusterType configuration.ClusterType, object runtimeclient.Object, specificKMemberName string) { for _, namespace := range []string{"johnspace", "second-namespace", ""} { t.Run("for namespace "+namespace, func(t *testing.T) { // given ctx := newAdminManifestsContextWithDefaultFiles(t, nil) cache := objectsCache{} toBeStored, expected := prepareObjects(t, "john", namespace, object) + clusterCtx := newFakeClusterContext(ctx, clusterType, withSpecificKMemberName(specificKMemberName)) + rootKDir := clusterType.String() + if specificKMemberName != "" { + rootKDir = specificKMemberName + } // when - err := cache.ensureObject(newFakeClusterContext(ctx, clusterType), toBeStored, nil) + err := cache.ensureObject(clusterCtx, toBeStored, nil) // then require.NoError(t, err) actual := object.DeepCopyObject().(runtimeclient.Object) - inObjectCache(t, ctx.outDir, clusterType.String(), cache). + inObjectCache(t, ctx.outDir, rootKDir, cache). assertObject(toBeStored.GetNamespace(), "john", actual, func() { assert.Equal(t, expected, actual) }) - verifyUpdates(t, newFakeClusterContext(ctx, clusterType), cache, object, toBeStored, expected, clusterType.String()) + verifyUpdates(t, clusterCtx, cache, object, toBeStored, expected, rootKDir) t.Run("second resource", func(t *testing.T) { // given toBeStored2, expected2 := prepareObjects(t, "second", namespace, object) // when - err := cache.ensureObject(newFakeClusterContext(ctx, clusterType), toBeStored2, nil) + err := cache.ensureObject(clusterCtx, toBeStored2, nil) // then require.NoError(t, err) actual := object.DeepCopyObject().(runtimeclient.Object) - inObjectCache(t, ctx.outDir, clusterType.String(), cache). + inObjectCache(t, ctx.outDir, rootKDir, cache). assertObject(toBeStored.GetNamespace(), "second", actual, func() { assert.Equal(t, expected2, actual) }) t.Run("no change when update function fails", func(t *testing.T) { // when - err := cache.ensureObject(newFakeClusterContext(ctx, clusterType), toBeStored2, func(object runtimeclient.Object) (bool, error) { + err := cache.ensureObject(clusterCtx, toBeStored2, func(object runtimeclient.Object) (bool, error) { object.SetLabels(map[string]string{"dummy-key": "dummy-value"}) return true, fmt.Errorf("some errror") }) @@ -95,7 +108,7 @@ func verifyEnsureManifest(t *testing.T, clusterType configuration.ClusterType, o // then require.Error(t, err) actual := object.DeepCopyObject().(runtimeclient.Object) - inObjectCache(t, ctx.outDir, clusterType.String(), cache). + inObjectCache(t, ctx.outDir, rootKDir, cache). assertObject(toBeStored.GetNamespace(), "second", actual, func() { assert.Equal(t, expected2, actual) }) @@ -108,7 +121,7 @@ func verifyEnsureManifest(t *testing.T, clusterType configuration.ClusterType, o invalid.SetName("") // when - err := cache.ensureObject(newFakeClusterContext(ctx, clusterType), invalid.DeepCopyObject().(runtimeclient.Object), nil) + err := cache.ensureObject(clusterCtx, invalid.DeepCopyObject().(runtimeclient.Object), nil) // then require.Error(t, err) @@ -119,7 +132,7 @@ func verifyEnsureManifest(t *testing.T, clusterType configuration.ClusterType, o // given toBeStored, expected := prepareObjects(t, "john", namespace, object) cache := objectsCache{} - require.NoError(t, cache.ensureObject(newFakeClusterContext(ctx, clusterType), toBeStored, nil)) + require.NoError(t, cache.ensureObject(clusterCtx, toBeStored, nil)) // when err := cache.ensureObject(newFakeClusterContext(ctx, clusterType.TheOtherType()), toBeStored, nil) @@ -127,7 +140,7 @@ func verifyEnsureManifest(t *testing.T, clusterType configuration.ClusterType, o // then require.NoError(t, err) actual := object.DeepCopyObject().(runtimeclient.Object) - inObjectCache(t, ctx.outDir, clusterType.String(), cache). + inObjectCache(t, ctx.outDir, rootKDir, cache). assertObject(toBeStored.GetNamespace(), "john", actual, func() { assert.Equal(t, expected, actual) }) @@ -140,62 +153,65 @@ func verifyEnsureManifest(t *testing.T, clusterType configuration.ClusterType, o assertObjectDoesNotExist(toBeStored.GetNamespace(), "john", object) }) - t.Run("single-cluster mode enabled", func(t *testing.T) { - // given - ctx := newAdminManifestsContextWithDefaultFiles(t, nil) - ctx.adminManifestsFlags.singleCluster = true - - t.Run("update after move to base", func(t *testing.T) { + if specificKMemberName == "" { + rootKDir := clusterType.String() + t.Run("single-cluster mode enabled", func(t *testing.T) { // given - toBeStored, expected := prepareObjects(t, "john", namespace, object) - cache := objectsCache{} - require.NoError(t, cache.ensureObject(newFakeClusterContext(ctx, clusterType), toBeStored, nil)) - - // when - err := cache.ensureObject(newFakeClusterContext(ctx, clusterType.TheOtherType()), toBeStored, nil) - - // then - require.NoError(t, err) - inObjectCache(t, ctx.outDir, clusterType.String(), cache). - assertObjectDoesNotExist(toBeStored.GetNamespace(), "john", object) - inObjectCache(t, ctx.outDir, clusterType.TheOtherType().String(), cache). - assertObjectDoesNotExist(toBeStored.GetNamespace(), "john", object) - baseActual := object.DeepCopyObject().(runtimeclient.Object) - inObjectCache(t, ctx.outDir, "base", cache). - assertObject(toBeStored.GetNamespace(), "john", baseActual, func() { - assert.Equal(t, expected, baseActual) - }) - - verifyUpdates(t, newFakeClusterContext(ctx, clusterType), cache, object, toBeStored, expected, "base") - }) - - t.Run("update while moving to base", func(t *testing.T) { - // given - toBeStored, expected := prepareObjects(t, "john", namespace, object) - modifiedSA := expected.DeepCopyObject().(runtimeclient.Object) - modifiedSA.SetLabels(map[string]string{"dummy-key": "dummy-value"}) - cache := objectsCache{} - require.NoError(t, cache.ensureObject(newFakeClusterContext(ctx, clusterType), toBeStored, nil)) - - // when - err := cache.ensureObject(newFakeClusterContext(ctx, clusterType.TheOtherType()), toBeStored, func(object runtimeclient.Object) (bool, error) { - object.SetLabels(map[string]string{"dummy-key": "dummy-value"}) - return true, nil + ctx := newAdminManifestsContextWithDefaultFiles(t, nil) + ctx.adminManifestsFlags.singleCluster = true + + t.Run("update after move to base", func(t *testing.T) { + // given + toBeStored, expected := prepareObjects(t, "john", namespace, object) + cache := objectsCache{} + require.NoError(t, cache.ensureObject(newFakeClusterContext(ctx, clusterType), toBeStored, nil)) + + // when + err := cache.ensureObject(newFakeClusterContext(ctx, clusterType.TheOtherType()), toBeStored, nil) + + // then + require.NoError(t, err) + inObjectCache(t, ctx.outDir, rootKDir, cache). + assertObjectDoesNotExist(toBeStored.GetNamespace(), "john", object) + inObjectCache(t, ctx.outDir, clusterType.TheOtherType().String(), cache). + assertObjectDoesNotExist(toBeStored.GetNamespace(), "john", object) + baseActual := object.DeepCopyObject().(runtimeclient.Object) + inObjectCache(t, ctx.outDir, "base", cache). + assertObject(toBeStored.GetNamespace(), "john", baseActual, func() { + assert.Equal(t, expected, baseActual) + }) + + verifyUpdates(t, newFakeClusterContext(ctx, clusterType), cache, object, toBeStored, expected, "base") }) - // then - require.NoError(t, err) - inObjectCache(t, ctx.outDir, clusterType.String(), cache). - assertObjectDoesNotExist(toBeStored.GetNamespace(), "john", object) - inObjectCache(t, ctx.outDir, clusterType.TheOtherType().String(), cache). - assertObjectDoesNotExist(toBeStored.GetNamespace(), "john", object) - baseActual := object.DeepCopyObject().(runtimeclient.Object) - inObjectCache(t, ctx.outDir, "base", cache). - assertObject(toBeStored.GetNamespace(), "john", baseActual, func() { - assert.Equal(t, modifiedSA, baseActual) + t.Run("update while moving to base", func(t *testing.T) { + // given + toBeStored, expected := prepareObjects(t, "john", namespace, object) + modifiedSA := expected.DeepCopyObject().(runtimeclient.Object) + modifiedSA.SetLabels(map[string]string{"dummy-key": "dummy-value"}) + cache := objectsCache{} + require.NoError(t, cache.ensureObject(newFakeClusterContext(ctx, clusterType), toBeStored, nil)) + + // when + err := cache.ensureObject(newFakeClusterContext(ctx, clusterType.TheOtherType()), toBeStored, func(object runtimeclient.Object) (bool, error) { + object.SetLabels(map[string]string{"dummy-key": "dummy-value"}) + return true, nil }) + + // then + require.NoError(t, err) + inObjectCache(t, ctx.outDir, rootKDir, cache). + assertObjectDoesNotExist(toBeStored.GetNamespace(), "john", object) + inObjectCache(t, ctx.outDir, clusterType.TheOtherType().String(), cache). + assertObjectDoesNotExist(toBeStored.GetNamespace(), "john", object) + baseActual := object.DeepCopyObject().(runtimeclient.Object) + inObjectCache(t, ctx.outDir, "base", cache). + assertObject(toBeStored.GetNamespace(), "john", baseActual, func() { + assert.Equal(t, modifiedSA, baseActual) + }) + }) }) - }) + } }) }) } diff --git a/pkg/test/environment_config.go b/pkg/test/environment_config.go index a4cf7a2..03ec13f 100644 --- a/pkg/test/environment_config.go +++ b/pkg/test/environment_config.go @@ -24,15 +24,27 @@ func Clusters(hostURL string) ClustersCreator { } } -func (m ClustersCreator) AddMember(name, URL string) ClustersCreator { +func (m ClustersCreator) AddMember(name, URL string, options ...MemberClusterOption) ClustersCreator { + memberCluster := assets.MemberCluster{ + Name: name, + ClusterConfig: assets.ClusterConfig{ + API: URL, + }, + } + for _, modify := range options { + modify(&memberCluster) + } return func(clusters *assets.Clusters) { m(clusters) - clusters.Members = append(clusters.Members, assets.MemberCluster{ - Name: name, - ClusterConfig: assets.ClusterConfig{ - API: URL, - }, - }) + clusters.Members = append(clusters.Members, memberCluster) + } +} + +type MemberClusterOption func(*assets.MemberCluster) + +func WithSeparateKustomizeComponent() MemberClusterOption { + return func(memberCluster *assets.MemberCluster) { + memberCluster.SeparateKustomizeComponent = true } } @@ -57,6 +69,14 @@ func Sa(baseName, namespace string, permissions ...PermissionsPerClusterTypeModi } } +func (c ServiceAccountCreator) WithSkippedMembers(members ...string) ServiceAccountCreator { + return func() assets.ServiceAccount { + serviceAccount := c() + serviceAccount.Selector.SkipMembers = members + return serviceAccount + } +} + func NewPermissionsPerClusterType(permissions ...PermissionsPerClusterTypeModifier) assets.PermissionsPerClusterType { perm := map[string]assets.PermissionBindings{} for _, addPermissions := range permissions { @@ -151,3 +171,11 @@ func User(name string, IDs []string, allCluster bool, group string, permissions return user } } + +func (c UserCreator) WithSkippedMembers(members ...string) UserCreator { + return func() assets.User { + user := c() + user.Selector.SkipMembers = members + return user + } +} diff --git a/test-resources/dummy.openshiftapps.com/kubesaw-admins.yaml b/test-resources/dummy.openshiftapps.com/kubesaw-admins.yaml index dddcb2d..e607687 100644 --- a/test-resources/dummy.openshiftapps.com/kubesaw-admins.yaml +++ b/test-resources/dummy.openshiftapps.com/kubesaw-admins.yaml @@ -8,6 +8,7 @@ clusters: name: member-2 - api: https://api.dummy-m3.openshiftapps.com:6443 name: member-3 + separateKustomizeComponent: true serviceAccounts: @@ -215,6 +216,21 @@ users: clusterRoles: - view +- name: editor-not-included-in-member-3 + id: + - 5412345 + selector: + skipMembers: + - member-3 + member: + roleBindings: + - namespace: first-component + clusterRoles: + - edit + - namespace: second-component + clusterRoles: + - edit + - name: my-clusteradmin id: - 1234567890