Skip to content

Commit

Permalink
add new separateKustomizeComponent and skipMembers fields (#67)
Browse files Browse the repository at this point in the history
  • Loading branch information
MatousJobanek authored Aug 27, 2024
1 parent ed47159 commit 45d7ce4
Show file tree
Hide file tree
Showing 11 changed files with 362 additions and 125 deletions.
16 changes: 14 additions & 2 deletions pkg/assets/sandbox_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,23 +14,35 @@ type Clusters struct {
type MemberCluster struct {
Name string `yaml:"name"`
ClusterConfig `yaml:",inline"`
// SeparateKustomizeComponent when set to true, then the manifests for the member cluster will be generated in a separate
// Kustomize component (a directory structure that will contain all the generated manifests including the kustomization.yaml files).
// The name of the root folder will have the same name as the name of the member cluster.
SeparateKustomizeComponent bool `yaml:"separateKustomizeComponent"`
}

type ClusterConfig struct {
API string `yaml:"api"`
}

type ServiceAccount struct {
Name string `yaml:"name"`
Namespace string `yaml:"namespace,omitempty"`
Name string `yaml:"name"`
Namespace string `yaml:"namespace,omitempty"`
Selector Selector `yaml:"selector"`
PermissionsPerClusterType `yaml:",inline"`
}

// Selector contains fields to select clusters the entity should (not) be applied for
type Selector struct {
// SkipMembers can contain a list of member cluster names the entity shouldn't be applied for
SkipMembers []string `yaml:"skipMembers,omitempty"`
}

type User struct {
Name string `yaml:"name"`
ID []string `yaml:"id"`
AllClusters bool `yaml:"allClusters,omitempty"` // force user and identity manifest generation on all clusters, even if no permissions are specified
Groups []string `yaml:"groups"`
Selector Selector `yaml:"selector"`
PermissionsPerClusterType `yaml:",inline,omitempty"`
}

Expand Down
31 changes: 26 additions & 5 deletions pkg/cmd/generate/admin-manifests.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package generate

import (
"fmt"
"os"
"path/filepath"

Expand Down Expand Up @@ -35,7 +36,7 @@ ksctl generate admin-manifests ./path/to/kubesaw-stage.openshiftapps.com/kubesaw
}
command.Flags().StringVarP(&f.kubeSawAdminsFile, "kubesaw-admins", "c", "", "Use the given kubesaw-admin file")
command.Flags().StringVarP(&f.outDir, "out-dir", "o", "", "Directory where generated manifests should be stored")
command.Flags().BoolVarP(&f.singleCluster, "single-cluster", "s", false, "If host and member are deployed to the same cluster")
command.Flags().BoolVarP(&f.singleCluster, "single-cluster", "s", false, "If host and member are deployed to the same cluster. Cannot be used with separateKustomizeComponent set in one of the members.")
command.Flags().StringVar(&f.hostRootDir, "host-root-dir", "host", "The root directory name for host manifests")
command.Flags().StringVar(&f.memberRootDir, "member-root-dir", "member", "The root directory name for member manifests")

Expand All @@ -60,6 +61,13 @@ func adminManifests(term ioutils.Terminal, files assets.FS, flags adminManifests
if err != nil {
return errs.Wrapf(err, "unable get kubesaw-admins.yaml file from %s", flags.kubeSawAdminsFile)
}
if flags.singleCluster {
for _, memberCluster := range kubeSawAdmins.Clusters.Members {
if memberCluster.SeparateKustomizeComponent {
return fmt.Errorf("--single-cluster flag cannot be used with separateKustomizeComponent set in one of the members (%s)", memberCluster.Name)
}
}
}
err = os.RemoveAll(flags.outDir)
if err != nil {
return err
Expand All @@ -71,12 +79,20 @@ func adminManifests(term ioutils.Terminal, files assets.FS, flags adminManifests
files: files,
}
objsCache := objectsCache{}
if err := ensureCluster(ctx, configuration.Host, objsCache); err != nil {
if err := ensureCluster(ctx, configuration.Host, objsCache, ""); err != nil {
return err
}
if err := ensureCluster(ctx, configuration.Member, objsCache); err != nil {
if err := ensureCluster(ctx, configuration.Member, objsCache, ""); err != nil {
return err
}

for _, memberCluster := range kubeSawAdmins.Clusters.Members {
if memberCluster.SeparateKustomizeComponent {
if err := ensureCluster(ctx, configuration.Member, objsCache, memberCluster.Name); err != nil {
return err
}
}
}
return objsCache.writeManifests(ctx)
}

Expand All @@ -87,12 +103,17 @@ type adminManifestsContext struct {
files assets.FS
}

func ensureCluster(ctx *adminManifestsContext, clusterType configuration.ClusterType, cache objectsCache) error {
ctx.PrintContextSeparatorf("Generating manifests for %s cluster type", clusterType)
func ensureCluster(ctx *adminManifestsContext, clusterType configuration.ClusterType, cache objectsCache, specificKMemberName string) error {
if specificKMemberName == "" {
ctx.PrintContextSeparatorf("Generating manifests for %s cluster type", clusterType)
} else {
ctx.PrintContextSeparatorf("Generating manifests for %s cluster type in the separate Kustomize component: %s", clusterType, specificKMemberName)
}

clusterCtx := &clusterContext{
adminManifestsContext: ctx,
clusterType: clusterType,
specificKMemberName: specificKMemberName,
}

if err := ensureServiceAccounts(clusterCtx, cache); err != nil {
Expand Down
111 changes: 73 additions & 38 deletions pkg/cmd/generate/admin-manifests_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,18 +26,20 @@ func TestAdminManifests(t *testing.T) {
kubeSawAdmins := NewKubeSawAdmins(
Clusters(HostServerAPI).
AddMember("member1", Member1ServerAPI).
AddMember("member2", Member2ServerAPI),
AddMember("member2", Member2ServerAPI, WithSeparateKustomizeComponent()),
ServiceAccounts(
Sa("john", "",
HostRoleBindings("toolchain-host-operator", Role("install-operator"), ClusterRole("admin")),
MemberRoleBindings("toolchain-member-operator", Role("install-operator"), ClusterRole("admin"))),
MemberRoleBindings("toolchain-member-operator", Role("install-operator"), ClusterRole("admin"))).
WithSkippedMembers("member2"),
Sa("bob", "",
HostRoleBindings("toolchain-host-operator", Role("restart-deployment"), ClusterRole("edit")),
MemberRoleBindings("toolchain-member-operator", Role("restart-deployment"), ClusterRole("edit")))),
Users(
User("john-user", []string{"12345"}, false, "crtadmins-view",
HostRoleBindings("toolchain-host-operator", Role("register-cluster"), ClusterRole("edit")),
MemberRoleBindings("toolchain-member-operator", Role("register-cluster"), ClusterRole("edit"))),
MemberRoleBindings("toolchain-member-operator", Role("register-cluster"), ClusterRole("edit"))).
WithSkippedMembers("member2"),
User("bob-crtadmin", []string{"67890"}, false, "crtadmins-exec",
HostRoleBindings("toolchain-host-operator", Role("restart-deployment"), ClusterRole("admin")),
MemberRoleBindings("toolchain-member-operator", Role("restart-deployment"), ClusterRole("admin")))))
Expand Down Expand Up @@ -65,19 +67,43 @@ func TestAdminManifests(t *testing.T) {
})

t.Run("in single-cluster mode", func(t *testing.T) {
// given
outTempDir, err := os.MkdirTemp("", "admin-manifests-cli-test-")
require.NoError(t, err)
term := NewFakeTerminalWithResponse("Y")
term.Tee(os.Stdout)
flags := newAdminManifestsFlags(outDir(outTempDir), kubeSawAdminsFile(configFile), singleCluster())

// when
err = adminManifests(term, files, flags)

// then
require.NoError(t, err)
verifyFiles(t, flags)
t.Run("fails with separateKustomizeComponent set for member2", func(t *testing.T) {
// given
outTempDir, err := os.MkdirTemp("", "admin-manifests-cli-test-")
require.NoError(t, err)
term := NewFakeTerminalWithResponse("Y")
term.Tee(os.Stdout)
flags := newAdminManifestsFlags(outDir(outTempDir), kubeSawAdminsFile(configFile), singleCluster())

// when
err = adminManifests(term, files, flags)

// then
require.EqualError(t, err, "--single-cluster flag cannot be used with separateKustomizeComponent set in one of the members (member2)")
})

t.Run("without separateKustomizeComponent set for member2", func(t *testing.T) {
// given
kubeSawAdmins.Clusters.Members[1].SeparateKustomizeComponent = false
kubeSawAdminsContent, err := yaml.Marshal(kubeSawAdmins)
require.NoError(t, err)

configFile := createKubeSawAdminsFile(t, "kubesaw.host.openshiftapps.com", kubeSawAdminsContent)
files := newDefaultFiles(t)

outTempDir, err := os.MkdirTemp("", "admin-manifests-cli-test-")
require.NoError(t, err)
term := NewFakeTerminalWithResponse("Y")
term.Tee(os.Stdout)
flags := newAdminManifestsFlags(outDir(outTempDir), kubeSawAdminsFile(configFile), singleCluster())

// when
err = adminManifests(term, files, flags)

// then
require.NoError(t, err)
verifyFiles(t, flags)
})
})

t.Run("in custom host root directory", func(t *testing.T) {
Expand Down Expand Up @@ -183,14 +209,8 @@ func storeDummySA(t *testing.T, outDir string) {
func verifyFiles(t *testing.T, flags adminManifestsFlags) {
dirEntries, err := os.ReadDir(flags.outDir)
require.NoError(t, err)
var dirNames []string
if !flags.singleCluster {
assert.Len(t, dirEntries, 2)
dirNames = []string{dirEntries[0].Name(), dirEntries[1].Name()}
} else {
assert.Len(t, dirEntries, 3)
dirNames = []string{dirEntries[0].Name(), dirEntries[1].Name(), dirEntries[2].Name()}
}
assert.Len(t, dirEntries, 3)
dirNames := []string{dirEntries[0].Name(), dirEntries[1].Name(), dirEntries[2].Name()}

for _, clusterType := range configuration.ClusterTypes {
ns := commontest.HostOperatorNs
Expand All @@ -203,16 +223,24 @@ func verifyFiles(t *testing.T, flags adminManifestsFlags) {
verifyServiceAccounts(t, flags.outDir, expectedRootDir, clusterType, ns)
verifyUsers(t, flags.outDir, expectedRootDir, clusterType, ns, flags.singleCluster)
}

if !flags.singleCluster {
// if singleCluster is not used then let's verify that member2 was generated in a separate kustomize component
verifyServiceAccounts(t, flags.outDir, "member2", configuration.Member, commontest.MemberOperatorNs)
verifyUsers(t, flags.outDir, "member2", configuration.Member, commontest.MemberOperatorNs, flags.singleCluster)
}
}

func verifyServiceAccounts(t *testing.T, outDir, expectedRootDir string, clusterType configuration.ClusterType, roleNs string) {
saNs := fmt.Sprintf("sandbox-sre-%s", clusterType)

inKStructure(t, outDir, expectedRootDir).
assertSa(saNs, "john").
hasRole(roleNs, clusterType.AsSuffix("install-operator"), clusterType.AsSuffix("install-operator-john")).
hasNsClusterRole(roleNs, "admin", clusterType.AsSuffix("clusterrole-admin-john"))

if expectedRootDir != "member2" {
// john is skipped for member2 (when generated as a separate kustomize component)
inKStructure(t, outDir, expectedRootDir).
assertSa(saNs, "john").
hasRole(roleNs, clusterType.AsSuffix("install-operator"), clusterType.AsSuffix("install-operator-john")).
hasNsClusterRole(roleNs, "admin", clusterType.AsSuffix("clusterrole-admin-john"))
}
inKStructure(t, outDir, expectedRootDir).
assertSa(saNs, "bob").
hasRole(roleNs, clusterType.AsSuffix("restart-deployment"), clusterType.AsSuffix("restart-deployment-bob")).
Expand All @@ -225,20 +253,27 @@ func verifyUsers(t *testing.T, outDir, expectedRootDir string, clusterType confi
rootDir = "base"
}

inKStructure(t, outDir, rootDir).
assertUser("john-user").
hasIdentity("12345").
belongsToGroups(groups("crtadmins-view"), extraGroupsUserIsNotPartOf("crtadmins-exec"))

storageAssertion := inKStructure(t, outDir, expectedRootDir).storageAssertionImpl
newPermissionAssertion(storageAssertion, "", "john-user", "User").
hasRole(ns, clusterType.AsSuffix("register-cluster"), clusterType.AsSuffix("register-cluster-john-user")).
hasNsClusterRole(ns, "edit", clusterType.AsSuffix("clusterrole-edit-john-user"))
bobsExtraGroupsUserIsNotPartOf := extraGroupsUserIsNotPartOf()
if expectedRootDir != "member2" {
// john is skipped for member2 (when generated as a separate kustomize component)
inKStructure(t, outDir, rootDir).
assertUser("john-user").
hasIdentity("12345").
belongsToGroups(groups("crtadmins-view"), extraGroupsUserIsNotPartOf("crtadmins-exec"))

newPermissionAssertion(storageAssertion, "", "john-user", "User").
hasRole(ns, clusterType.AsSuffix("register-cluster"), clusterType.AsSuffix("register-cluster-john-user")).
hasNsClusterRole(ns, "edit", clusterType.AsSuffix("clusterrole-edit-john-user"))

// crtadmins-view group is not generated for member2 at all
bobsExtraGroupsUserIsNotPartOf = extraGroupsUserIsNotPartOf("crtadmins-view")
}

inKStructure(t, outDir, rootDir).
assertUser("bob-crtadmin").
hasIdentity("67890").
belongsToGroups(groups("crtadmins-exec"), extraGroupsUserIsNotPartOf("crtadmins-view"))
belongsToGroups(groups("crtadmins-exec"), bobsExtraGroupsUserIsNotPartOf)

newPermissionAssertion(storageAssertion, "", "bob-crtadmin", "User").
hasRole(ns, clusterType.AsSuffix("restart-deployment"), clusterType.AsSuffix("restart-deployment-bob-crtadmin")).
Expand Down
14 changes: 14 additions & 0 deletions pkg/cmd/generate/assertion_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -443,3 +443,17 @@ func (a *objectsCacheAssertion) assertNumberOfRoles(expectedNumber int) *objects
assert.Len(a.t, roles, expectedNumber)
return a
}

func (a *objectsCacheAssertion) assertNumberOfSAs(expectedNumber int) *objectsCacheAssertion {
roles, err := a.listObjects("serviceaccounts", "ServiceAccount", &corev1.ServiceAccount{})
require.NoError(a.t, err)
assert.Len(a.t, roles, expectedNumber)
return a
}

func (a *objectsCacheAssertion) assertNumberOfUsers(expectedNumber int) *objectsCacheAssertion {
roles, err := a.listObjects("users", "User", &userv1.User{})
require.NoError(a.t, err)
assert.Len(a.t, roles, expectedNumber)
return a
}
10 changes: 9 additions & 1 deletion pkg/cmd/generate/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,23 @@ package generate

import (
"github.com/kubesaw/ksctl/pkg/configuration"
"k8s.io/utils/strings/slices"
)

type clusterContext struct {
*adminManifestsContext
clusterType configuration.ClusterType
clusterType configuration.ClusterType
specificKMemberName string
}

// ensureServiceAccounts reads the list of service accounts definitions and it's permissions.
// It generates SA and roles & roleBindings for them
func ensureServiceAccounts(ctx *clusterContext, objsCache objectsCache) error {
ctx.Printlnf("-> Ensuring ServiceAccounts and its RoleBindings...")
for _, sa := range ctx.kubeSawAdmins.ServiceAccounts {
if ctx.specificKMemberName != "" && slices.Contains(sa.Selector.SkipMembers, ctx.specificKMemberName) {
continue
}

// by default, it should use the sandbox sre namespace. let's keep this empty (if the target namespace is not defined) so it is recognized in the ensureServiceAccount method based on the cluster type it is being applied in
saNamespace := ""
Expand Down Expand Up @@ -42,6 +47,9 @@ func ensureUsers(ctx *clusterContext, objsCache objectsCache) error {
ctx.Printlnf("-> Ensuring Users and its RoleBindings...")

for _, user := range ctx.kubeSawAdmins.Users {
if ctx.specificKMemberName != "" && slices.Contains(user.Selector.SkipMembers, ctx.specificKMemberName) {
continue
}
m := &permissionsManager{
objectsCache: objsCache,
createSubject: ensureUserIdentityAndGroups(user.ID, user.Groups),
Expand Down
Loading

0 comments on commit 45d7ce4

Please sign in to comment.