From ed47159dfc3e9e6ea1fd2170b03539657e521e8f Mon Sep 17 00:00:00 2001 From: Francisc Munteanu Date: Tue, 27 Aug 2024 10:28:09 +0200 Subject: [PATCH] feat: move add-cluster.sh script logic into register-member command (#53) * replace add cluster script --------- Co-authored-by: Matous Jobanek --- pkg/cmd/adm/register_member.go | 398 +++++++++++++-------- pkg/cmd/adm/register_member_test.go | 507 +++++++++------------------ pkg/cmd/generate/cli_configs.go | 6 +- pkg/cmd/generate/cli_configs_test.go | 2 +- pkg/utils/util.go | 16 +- 5 files changed, 445 insertions(+), 484 deletions(-) diff --git a/pkg/cmd/adm/register_member.go b/pkg/cmd/adm/register_member.go index 3df77f7..38afb79 100644 --- a/pkg/cmd/adm/register_member.go +++ b/pkg/cmd/adm/register_member.go @@ -4,10 +4,6 @@ import ( "context" "errors" "fmt" - "io" - "net/http" - "os" - "os/exec" "strings" "time" @@ -16,24 +12,28 @@ import ( "github.com/codeready-toolchain/toolchain-common/pkg/condition" "github.com/kubesaw/ksctl/pkg/client" "github.com/kubesaw/ksctl/pkg/cmd/flags" + "github.com/kubesaw/ksctl/pkg/cmd/generate" "github.com/kubesaw/ksctl/pkg/configuration" clicontext "github.com/kubesaw/ksctl/pkg/context" "github.com/kubesaw/ksctl/pkg/ioutils" "github.com/kubesaw/ksctl/pkg/utils" - errs "github.com/pkg/errors" "github.com/spf13/cobra" + authv1 "k8s.io/api/authentication/v1" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + "k8s.io/kubectl/pkg/scheme" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) const ( - AddClusterScriptDomain = "https://raw.githubusercontent.com/" - AddClusterScriptPath = "codeready-toolchain/toolchain-cicd/master/scripts/add-cluster.sh" - AddClusterScriptURL = AddClusterScriptDomain + AddClusterScriptPath + TokenExpirationDays = 3650 ) // newClientFromRestConfigFunc is a function to create a new Kubernetes client using the provided @@ -54,12 +54,13 @@ func newExtendedCommandContext(term ioutils.Terminal, clientCtor newClientFromRe } type registerMemberArgs struct { - hostKubeConfig string - memberKubeConfig string - hostNamespace string - memberNamespace string - nameSuffix string - useLetsEncrypt bool + hostKubeConfig string + memberKubeConfig string + hostNamespace string + memberNamespace string + nameSuffix string + useLetsEncrypt bool + waitForReadyTimeout time.Duration } func NewRegisterMemberCmd() *cobra.Command { @@ -71,15 +72,12 @@ func NewRegisterMemberCmd() *cobra.Command { RunE: func(cmd *cobra.Command, args []string) error { term := ioutils.NewTerminal(cmd.InOrStdin, cmd.OutOrStdout) ctx := newExtendedCommandContext(term, client.DefaultNewClientFromRestConfig) - newCommand := func(name string, args ...string) *exec.Cmd { - return exec.Command(name, args...) - } - return registerMemberCluster(ctx, newCommand, 5*time.Minute, commandArgs) + return registerMemberCluster(ctx, commandArgs) }, } // keep these values in sync with the values in defaultRegisterMemberArgs() function in the tests. - defaultLetsEncrypt := true + defaultTimeout := 2 * time.Minute defaultNameSuffix := "" defaultHostNs := "toolchain-host-operator" defaultMemberNs := "toolchain-member-operator" @@ -88,20 +86,16 @@ func NewRegisterMemberCmd() *cobra.Command { flags.MustMarkRequired(cmd, "host-kubeconfig") cmd.Flags().StringVar(&commandArgs.memberKubeConfig, "member-kubeconfig", "", "Path to the kubeconfig file of the member cluster") flags.MustMarkRequired(cmd, "member-kubeconfig") - cmd.Flags().BoolVar(&commandArgs.useLetsEncrypt, "lets-encrypt", defaultLetsEncrypt, fmt.Sprintf("Whether to use Let's Encrypt certificates or rely on the cluster certs (default: %t)", defaultLetsEncrypt)) - cmd.Flags().StringVar(&commandArgs.nameSuffix, "name-suffix", defaultNameSuffix, fmt.Sprintf("The suffix to append to the member name used when there are multiple members in a single cluster (default: '%s')", defaultNameSuffix)) - cmd.Flags().StringVar(&commandArgs.hostNamespace, "host-ns", defaultHostNs, fmt.Sprintf("The namespace of the host operator in the host cluster (default: '%s')", defaultHostNs)) - cmd.Flags().StringVar(&commandArgs.memberNamespace, "member-ns", defaultMemberNs, fmt.Sprintf("The namespace of the member operator in the member cluster (default: '%s')", defaultMemberNs)) + cmd.Flags().BoolVar(&commandArgs.useLetsEncrypt, "lets-encrypt", true, "Whether to use Let's Encrypt certificates or rely on the cluster certs.") + cmd.Flags().StringVar(&commandArgs.nameSuffix, "name-suffix", defaultNameSuffix, "The suffix to append to the member name used when there are multiple members in a single cluster.") + cmd.Flags().StringVar(&commandArgs.hostNamespace, "host-ns", defaultHostNs, "The namespace of the host operator in the host cluster.") + cmd.Flags().StringVar(&commandArgs.memberNamespace, "member-ns", defaultMemberNs, "The namespace of the member operator in the member cluster.") + cmd.Flags().DurationVar(&commandArgs.waitForReadyTimeout, "timeout", defaultTimeout, "The max timeout used when waiting for each of the computations to be completed.") return cmd } -func registerMemberCluster(ctx *extendedCommandContext, newCommand client.CommandCreator, waitForReadyTimeout time.Duration, args registerMemberArgs) error { - data, err := dataFromArgs(ctx, args, waitForReadyTimeout) - if err != nil { - return err - } - - validated, err := data.validate(ctx) +func registerMemberCluster(ctx *extendedCommandContext, args registerMemberArgs) error { + validated, err := validateArgs(ctx, args) if err != nil { return err } @@ -110,7 +104,7 @@ func registerMemberCluster(ctx *extendedCommandContext, newCommand client.Comman sb := strings.Builder{} sb.WriteString("Cannot proceed because of the following problems:") for _, e := range validated.errors { - sb.WriteString("\n- ") + sb.WriteString("\n\t- ") sb.WriteString(e) } return errors.New(sb.String()) @@ -120,59 +114,197 @@ func registerMemberCluster(ctx *extendedCommandContext, newCommand client.Comman return nil } - return validated.perform(ctx, newCommand) + return validated.perform(ctx) +} + +func (v *registerMemberValidated) getSourceAndTargetClusters(sourceClusterType configuration.ClusterType) (clusterData, clusterData) { + if sourceClusterType == configuration.Member { + return v.memberClusterData, v.hostClusterData + } + return v.hostClusterData, v.memberClusterData } -func runAddClusterScript(term ioutils.Terminal, newCommand client.CommandCreator, joiningClusterType configuration.ClusterType, hostKubeconfig, hostNs, memberKubeconfig, memberNs, nameSuffix string, useLetsEncrypt bool) error { - if !term.AskForConfirmation(ioutils.WithMessagef("register the %s cluster by creating a ToolchainCluster CR, a Secret and a new ServiceAccount resource?", joiningClusterType)) { +// addCluster creates a secret and a ToolchainCluster resource on the `targetCluster`. +// This ToolchainCluster CR stores a reference to the secret which contains the kubeconfig of the `sourceCluster`. Thus enables the `targetCluster` to interact with the `sourceCluster`. +// - `targetCluster` is the cluster where we create the ToolchainCluster resource and the secret +// - `sourceCluster` is the cluster referenced in the kubeconfig/ToolchainCluster of the `targetCluster` +func (v *registerMemberValidated) addCluster(ctx *extendedCommandContext, sourceClusterType configuration.ClusterType) error { + ctx.PrintContextSeparatorf("Ensuring connection from the %s cluster to the %s via a ToolchainCluster CR, a Secret, and a new ServiceAccount resource", sourceClusterType, sourceClusterType.TheOtherType()) + sourceClusterDetails, targetClusterDetails := v.getSourceAndTargetClusters(sourceClusterType) + // wait for the SA to be ready + toolchainClusterSAKey := runtimeclient.ObjectKey{ + Name: fmt.Sprintf("toolchaincluster-%s", sourceClusterType), + Namespace: sourceClusterDetails.namespace, + } + if err := waitForToolchainClusterSA(ctx.CommandContext, sourceClusterDetails.client, toolchainClusterSAKey, v.args.waitForReadyTimeout); err != nil { + ctx.Printlnf("The %s ServiceAccount is not present in the %s cluster.", toolchainClusterSAKey, sourceClusterType) + ctx.Printlnf("Please check the %[1]s ToolchainCluster ServiceAccount in the %[2]s %[3]s cluster or the deployment of the %[3]s operator.", toolchainClusterSAKey, sourceClusterDetails.apiEndpoint, sourceClusterType) + return err + } + // source cluster details + ctx.Printlnf("The source cluster name: %s", sourceClusterDetails.toolchainClusterName) + ctx.Printlnf("The API endpoint of the source cluster: %s", sourceClusterDetails.apiEndpoint) + + // target to details + ctx.Printlnf("The name of the target cluster: %s", targetClusterDetails.toolchainClusterName) + ctx.Printlnf("The API endpoint of the target cluster: %s", targetClusterDetails.apiEndpoint) + + // generate a token that will be used for the kubeconfig + sourceTargetRestClient, err := newRestClient(sourceClusterDetails.kubeConfig) + if err != nil { + return err + } + token, err := generate.GetServiceAccountToken(sourceTargetRestClient, toolchainClusterSAKey, TokenExpirationDays) + if err != nil { + return err + } + // TODO drop this part together with the --lets-encrypt flag and start loading certificate from the kubeconfig as soon as ToolchainCluster controller supports loading certificates from kubeconfig + var insecureSkipTLSVerify bool + if v.args.useLetsEncrypt { + ctx.Printlnf("using let's encrypt certificate") + insecureSkipTLSVerify = false + } else { + ctx.Printlnf("setting insecure skip tls verification flags") + insecureSkipTLSVerify = true + } + // generate the kubeconfig that can be used by target cluster to interact with the source cluster + generatedKubeConfig := generateKubeConfig(token, sourceClusterDetails.apiEndpoint, sourceClusterDetails.namespace, insecureSkipTLSVerify) + generatedKubeConfigFormatted, err := clientcmd.Write(*generatedKubeConfig) + if err != nil { + return err + } + + // Create or Update the secret on the targetCluster + secretName := toolchainClusterSAKey.Name + "-" + sourceClusterDetails.toolchainClusterName + ctx.Printlnf("creating secret %s/%s in the %s cluster", targetClusterDetails.namespace, secretName, sourceClusterType.TheOtherType()) + kubeConfigSecret := &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: secretName, Namespace: targetClusterDetails.namespace}} + _, err = controllerutil.CreateOrUpdate(context.TODO(), targetClusterDetails.client, kubeConfigSecret, func() error { + + // update the secret label + labels := kubeConfigSecret.GetLabels() + if labels == nil { + labels = make(map[string]string) + } + labels[toolchainv1alpha1.ToolchainClusterLabel] = sourceClusterDetails.toolchainClusterName + kubeConfigSecret.Labels = labels + + // update the kubeconfig data + kubeConfigSecret.StringData = map[string]string{ + "kubeconfig": string(generatedKubeConfigFormatted), + "token": token, + } + return nil + }) + + if err != nil { + return err } + ctx.Println("Secret successfully reconciled") + + // TODO -- temporary logic + // The creation of the toolchaincluster is just temporary until we implement https://issues.redhat.com/browse/KUBESAW-44, + // the creation logic will be moved to the toolchaincluster_resource controller in toolchain-common and will be based on the secret created above. + // + // create/update toolchaincluster on the targetCluster + ctx.Printlnf("creating ToolchainCluster representation of %s in %s:", sourceClusterType, targetClusterDetails.toolchainClusterName) + toolchainClusterCR := &toolchainv1alpha1.ToolchainCluster{ObjectMeta: metav1.ObjectMeta{Name: sourceClusterDetails.toolchainClusterName, Namespace: targetClusterDetails.namespace}} + _, err = controllerutil.CreateOrUpdate(context.TODO(), targetClusterDetails.client, toolchainClusterCR, func() error { + + // update the tc label + labels := toolchainClusterCR.GetLabels() + if labels == nil { + labels = make(map[string]string) + } + // TODO drop this "namespace" label as soon as ToolchainCluster controller supports loading data from kubeconfig + labels["namespace"] = sourceClusterDetails.namespace + toolchainClusterCR.Labels = labels + toolchainClusterCR.Spec.APIEndpoint = sourceClusterDetails.apiEndpoint + toolchainClusterCR.Spec.SecretRef.Name = secretName + if insecureSkipTLSVerify { + toolchainClusterCR.Spec.DisabledTLSValidations = []toolchainv1alpha1.TLSValidation{toolchainv1alpha1.TLSAll} + } + + return nil + }) - script, err := downloadScript(term) if err != nil { return err } - args := []string{script.Name(), "--type", joiningClusterType.String(), "--host-kubeconfig", hostKubeconfig, "--host-ns", hostNs, "--member-kubeconfig", memberKubeconfig, "--member-ns", memberNs} - if len(nameSuffix) > 0 { - args = append(args, "--multi-member", nameSuffix) + ctx.Println("Toolchaincluster successfully reconciled") + toolchainClusterKey := runtimeclient.ObjectKey{ + Name: sourceClusterDetails.toolchainClusterName, + Namespace: targetClusterDetails.namespace, } - if useLetsEncrypt { - args = append(args, "--lets-encrypt") + if err := waitUntilToolchainClusterReady(ctx.CommandContext, targetClusterDetails.client, toolchainClusterKey, v.args.waitForReadyTimeout); err != nil { + ctx.Printlnf("The ToolchainCluster resource representing the %s in the %s cluster has not become ready.", sourceClusterType, sourceClusterType.TheOtherType()) + ctx.Printlnf("Please check the %s ToolchainCluster resource in the %s %s cluster.", toolchainClusterKey, targetClusterDetails.apiEndpoint, sourceClusterType.TheOtherType()) + return err } - term.Printlnf("Command to be called: bash %s\n", strings.Join(args, " ")) - bash := newCommand("bash", args...) - bash.Stdout = os.Stdout - bash.Stderr = os.Stderr - return bash.Run() + // -- end temporary logic + + return nil } -func downloadScript(term ioutils.Terminal) (*os.File, error) { - resp, err := http.Get(AddClusterScriptURL) - if err != nil { - return nil, errs.Wrapf(err, "unable to get add-script.sh") +func newRestClient(kubeConfigPath string) (*rest.RESTClient, error) { + restClientConfig, err := clientcmd.BuildConfigFromFlags("", kubeConfigPath) + // those fields are required when using the rest client otherwise it throws and error + // see: https://github.com/kubernetes/client-go/blob/46965213e4561ad1b9c585d1c3551a0cc8d3fcd6/rest/config.go#L310-L315 + restClientConfig.ContentConfig = rest.ContentConfig{ + GroupVersion: &authv1.SchemeGroupVersion, + NegotiatedSerializer: scheme.Codecs, } - if resp.StatusCode < 200 || resp.StatusCode > 299 { - return nil, fmt.Errorf("unable to get add-script.sh - response status %s", resp.Status) + if err != nil { + return nil, err } - defer func() { - if err := resp.Body.Close(); err != nil { - term.Printlnf(err.Error()) - } - }() - // Create the file - file, err := os.CreateTemp("", "add-cluster-*.sh") + restClient, err := rest.RESTClientFor(restClientConfig) if err != nil { return nil, err } - defer func() { - if err := file.Close(); err != nil { - term.Printlnf(err.Error()) - } - }() + return restClient, nil +} + +func generateKubeConfig(token, apiEndpoint, namespace string, insecureSkipTLSVerify bool) *clientcmdapi.Config { + // create apiConfig based on the secret content + return &clientcmdapi.Config{ + Contexts: map[string]*clientcmdapi.Context{ + "ctx": { + Cluster: "cluster", + Namespace: namespace, + AuthInfo: "auth", + }, + }, + CurrentContext: "ctx", + Clusters: map[string]*clientcmdapi.Cluster{ + "cluster": { + Server: apiEndpoint, + InsecureSkipTLSVerify: insecureSkipTLSVerify, + }, + }, + AuthInfos: map[string]*clientcmdapi.AuthInfo{ + "auth": { + Token: token, + }, + }, + } +} - // Write the body to file - _, err = io.Copy(file, resp.Body) - return file, err +// waitForToolchainClusterSA waits for the toolchaincluster service account to be present +func waitForToolchainClusterSA(ctx *clicontext.CommandContext, cl runtimeclient.Client, toolchainClusterKey runtimeclient.ObjectKey, waitForReadyTimeout time.Duration) error { + return wait.PollImmediate(2*time.Second, waitForReadyTimeout, func() (bool, error) { + ctx.Printlnf("waiting for ToolchainCluster SA %s to become ready", toolchainClusterKey) + tc := &v1.ServiceAccount{} + if err := cl.Get(ctx, toolchainClusterKey, tc); err != nil { + if apierrors.IsNotFound(err) { + // keep looking for the resource + return false, nil + } + // exit if and error occurred + return false, err + } + // exit if we found the resource + return true, nil + }) } func waitUntilToolchainClusterReady(ctx *clicontext.CommandContext, cl runtimeclient.Client, toolchainClusterKey runtimeclient.ObjectKey, waitForReadyTimeout time.Duration) error { @@ -180,6 +312,11 @@ func waitUntilToolchainClusterReady(ctx *clicontext.CommandContext, cl runtimecl ctx.Printlnf("waiting for ToolchainCluster %s to become ready", toolchainClusterKey) tc := &toolchainv1alpha1.ToolchainCluster{} if err := cl.Get(ctx, toolchainClusterKey, tc); err != nil { + if apierrors.IsNotFound(err) { + // keep looking for the resource + return false, nil + } + // exit if and error occurred return false, err } @@ -217,42 +354,20 @@ func getToolchainClustersWithHostname(ctx context.Context, cl runtimeclient.Clie return clusters, nil } -type registerMemberData struct { - hostClusterClient runtimeclient.Client - memberClusterClient runtimeclient.Client - hostApiEndpoint string - memberApiEndpoint string - args registerMemberArgs - waitForReadyTimeout time.Duration +type clusterData struct { + client runtimeclient.Client + apiEndpoint string + namespace string + toolchainClusterName string + kubeConfig string } type registerMemberValidated struct { - registerMemberData - hostToolchainClusterName string - memberToolchainClusterName string - warnings []string - errors []string -} - -func dataFromArgs(ctx *extendedCommandContext, args registerMemberArgs, waitForReadyTimeout time.Duration) (*registerMemberData, error) { - hostApiEndpoint, hostClusterClient, err := getApiEndpointAndClient(ctx, args.hostKubeConfig) - if err != nil { - return nil, err - } - - memberApiEndpoint, memberClusterClient, err := getApiEndpointAndClient(ctx, args.memberKubeConfig) - if err != nil { - return nil, err - } - - return ®isterMemberData{ - args: args, - hostApiEndpoint: hostApiEndpoint, - memberApiEndpoint: memberApiEndpoint, - hostClusterClient: hostClusterClient, - memberClusterClient: memberClusterClient, - waitForReadyTimeout: waitForReadyTimeout, - }, nil + args registerMemberArgs + hostClusterData clusterData + memberClusterData clusterData + warnings []string + errors []string } func getApiEndpointAndClient(ctx *extendedCommandContext, kubeConfigPath string) (apiEndpoint string, cl runtimeclient.Client, err error) { @@ -276,25 +391,35 @@ func getApiEndpointAndClient(ctx *extendedCommandContext, kubeConfigPath string) return } -func (d *registerMemberData) validate(ctx *extendedCommandContext) (*registerMemberValidated, error) { - hostToolchainClusterName, err := utils.GetToolchainClusterName(string(configuration.Host), d.hostApiEndpoint, "") +func validateArgs(ctx *extendedCommandContext, args registerMemberArgs) (*registerMemberValidated, error) { + hostApiEndpoint, hostClusterClient, err := getApiEndpointAndClient(ctx, args.hostKubeConfig) + if err != nil { + return nil, err + } + + memberApiEndpoint, memberClusterClient, err := getApiEndpointAndClient(ctx, args.memberKubeConfig) + if err != nil { + return nil, err + } + + hostToolchainClusterName, err := utils.GetToolchainClusterName(string(configuration.Host), hostApiEndpoint, "") if err != nil { return nil, err } // figure out the name that will be given to our new ToolchainCluster representing the member in the host cluster. // This is the same name that the add-cluster.sh script will deduce and use. - membersInHost, err := getToolchainClustersWithHostname(ctx, d.hostClusterClient, d.memberApiEndpoint, d.args.hostNamespace) + membersInHost, err := getToolchainClustersWithHostname(ctx, hostClusterClient, memberApiEndpoint, args.hostNamespace) if err != nil { return nil, err } - memberToolchainClusterName, err := utils.GetToolchainClusterName(string(configuration.Member), d.memberApiEndpoint, d.args.nameSuffix) + memberToolchainClusterName, err := utils.GetToolchainClusterName(string(configuration.Member), memberApiEndpoint, args.nameSuffix) if err != nil { return nil, err } hostsInMember := &toolchainv1alpha1.ToolchainClusterList{} - if err = d.memberClusterClient.List(ctx, hostsInMember, runtimeclient.InNamespace(d.args.memberNamespace)); err != nil { + if err = memberClusterClient.List(ctx, hostsInMember, runtimeclient.InNamespace(args.memberNamespace)); err != nil { return nil, err } @@ -302,16 +427,16 @@ func (d *registerMemberData) validate(ctx *extendedCommandContext) (*registerMem var errors []string if len(hostsInMember.Items) > 1 { - errors = append(errors, fmt.Sprintf("member misconfigured: the member cluster (%s) is already registered with more than 1 host in namespace %s", d.memberApiEndpoint, d.args.memberNamespace)) + errors = append(errors, fmt.Sprintf("member misconfigured: the member cluster (%s) is already registered with more than 1 host in namespace %s", memberApiEndpoint, args.memberNamespace)) } else if len(hostsInMember.Items) == 1 { - if hostsInMember.Items[0].Spec.APIEndpoint != d.hostApiEndpoint { - errors = append(errors, fmt.Sprintf("the member is already registered with another host (%s) so registering it with the new one (%s) would result in an invalid configuration", hostsInMember.Items[0].Spec.APIEndpoint, d.hostApiEndpoint)) + if hostsInMember.Items[0].Spec.APIEndpoint != hostApiEndpoint { + errors = append(errors, fmt.Sprintf("the member is already registered with another host (%s) so registering it with the new one (%s) would result in an invalid configuration", hostsInMember.Items[0].Spec.APIEndpoint, hostApiEndpoint)) } if hostsInMember.Items[0].Name != hostToolchainClusterName { errors = append(errors, fmt.Sprintf("the host is already in the member namespace using a ToolchainCluster object with the name '%s' but the new registration would use a ToolchainCluster with the name '%s' which would lead to an invalid configuration", hostsInMember.Items[0].Name, hostToolchainClusterName)) } } - existingMemberToolchainCluster := findToolchainClusterForMember(membersInHost, d.memberApiEndpoint, d.args.memberNamespace) + existingMemberToolchainCluster := findToolchainClusterForMember(membersInHost, memberApiEndpoint, args.memberNamespace) if existingMemberToolchainCluster != nil { warnings = append(warnings, fmt.Sprintf("there already is a registered member for the same member API endpoint and operator namespace (%s), proceeding will overwrite the objects representing it in the host and member clusters", runtimeclient.ObjectKeyFromObject(existingMemberToolchainCluster))) if existingMemberToolchainCluster.Name != memberToolchainClusterName { @@ -320,11 +445,23 @@ func (d *registerMemberData) validate(ctx *extendedCommandContext) (*registerMem } return ®isterMemberValidated{ - registerMemberData: *d, - hostToolchainClusterName: hostToolchainClusterName, - memberToolchainClusterName: memberToolchainClusterName, - warnings: warnings, - errors: errors, + args: args, + hostClusterData: clusterData{ + client: hostClusterClient, + apiEndpoint: hostApiEndpoint, + namespace: args.hostNamespace, + toolchainClusterName: hostToolchainClusterName, + kubeConfig: args.hostKubeConfig, + }, + memberClusterData: clusterData{ + client: memberClusterClient, + apiEndpoint: memberApiEndpoint, + namespace: args.memberNamespace, + toolchainClusterName: memberToolchainClusterName, + kubeConfig: args.memberKubeConfig, + }, + warnings: warnings, + errors: errors, }, nil } @@ -351,35 +488,16 @@ func (v *registerMemberValidated) confirmationPrompt() ioutils.ConfirmationMessa return ioutils.WithMessagef(sb.String(), args...) } -func (v *registerMemberValidated) perform(ctx *extendedCommandContext, newCommand client.CommandCreator) error { +func (v *registerMemberValidated) perform(ctx *extendedCommandContext) error { // add the host entry to the member cluster first. We assume that there is just 1 toolchain cluster entry in the member // cluster (i.e. it just points back to the host), so there's no need to determine the number of entries with the same // API endpoint. - hostToolchainClusterKey := runtimeclient.ObjectKey{ - Name: v.hostToolchainClusterName, - Namespace: v.args.memberNamespace, - } - if err := runAddClusterScript(ctx, newCommand, configuration.Host, v.args.hostKubeConfig, v.args.hostNamespace, v.args.memberKubeConfig, v.args.memberNamespace, "", v.args.useLetsEncrypt); err != nil { - return err - } - - if err := waitUntilToolchainClusterReady(ctx.CommandContext, v.memberClusterClient, hostToolchainClusterKey, v.waitForReadyTimeout); err != nil { - ctx.Println("The ToolchainCluster resource representing the host in the member cluster has not become ready.") - ctx.Printlnf("Please check the %s ToolchainCluster resource in the %s member cluster.", hostToolchainClusterKey, v.memberApiEndpoint) - return err - } - - memberToolchainClusterKey := runtimeclient.ObjectKey{ - Name: v.memberToolchainClusterName, - Namespace: v.args.hostNamespace, - } - if err := runAddClusterScript(ctx, newCommand, configuration.Member, v.args.hostKubeConfig, v.args.hostNamespace, v.args.memberKubeConfig, v.args.memberNamespace, v.args.nameSuffix, v.args.useLetsEncrypt); err != nil { + if err := v.addCluster(ctx, configuration.Host); err != nil { return err } - if err := waitUntilToolchainClusterReady(ctx.CommandContext, v.hostClusterClient, memberToolchainClusterKey, v.waitForReadyTimeout); err != nil { - ctx.Println("The ToolchainCluster resource representing the member in the host cluster has not become ready.") - ctx.Printlnf("Please check the %s ToolchainCluster resource in the %s host cluster. Note also that there already exists %s ToolchainCluster resource in the member cluster.", memberToolchainClusterKey, v.hostApiEndpoint, hostToolchainClusterKey) + // add the member entry in the host cluster + if err := v.addCluster(ctx, configuration.Member); err != nil { return err } @@ -389,11 +507,11 @@ func (v *registerMemberValidated) perform(ctx *extendedCommandContext, newComman APIVersion: toolchainv1alpha1.GroupVersion.Identifier(), }, ObjectMeta: metav1.ObjectMeta{ - Name: memberToolchainClusterKey.Name, - Namespace: memberToolchainClusterKey.Namespace, + Name: v.memberClusterData.toolchainClusterName, + Namespace: v.hostClusterData.namespace, }, Spec: toolchainv1alpha1.SpaceProvisionerConfigSpec{ - ToolchainCluster: memberToolchainClusterKey.Name, + ToolchainCluster: v.memberClusterData.toolchainClusterName, Enabled: false, PlacementRoles: []string{ cluster.RoleLabel(cluster.Tenant), @@ -406,7 +524,7 @@ Modify and apply the following SpaceProvisionerConfig to the host cluster (%s) t of the spaces to the newly registered member cluster. Nothing will be deployed to the cluster until the SpaceProvisionerConfig.spec.enabled is set to true. -`, v.hostApiEndpoint)) +`, v.hostClusterData.apiEndpoint)) } func findToolchainClusterForMember(allToolchainClusters []toolchainv1alpha1.ToolchainCluster, memberAPIEndpoint, memberOperatorNamespace string) *toolchainv1alpha1.ToolchainCluster { diff --git a/pkg/cmd/adm/register_member_test.go b/pkg/cmd/adm/register_member_test.go index 84fdbd7..db52b8c 100644 --- a/pkg/cmd/adm/register_member_test.go +++ b/pkg/cmd/adm/register_member_test.go @@ -2,6 +2,7 @@ package adm import ( "context" + "fmt" "path/filepath" "strings" "testing" @@ -11,8 +12,6 @@ import ( "github.com/codeready-toolchain/toolchain-common/pkg/cluster" "github.com/codeready-toolchain/toolchain-common/pkg/test" "github.com/ghodss/yaml" - "github.com/h2non/gock" - "github.com/kubesaw/ksctl/pkg/client" "github.com/kubesaw/ksctl/pkg/configuration" . "github.com/kubesaw/ksctl/pkg/test" "github.com/kubesaw/ksctl/pkg/utils" @@ -20,13 +19,13 @@ import ( "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/clientcmd/api" "k8s.io/client-go/util/homedir" - "k8s.io/utils/pointer" runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -35,129 +34,36 @@ func TestRegisterMember(t *testing.T) { SetFileConfig(t, Host(), Member()) hostKubeconfig := PersistKubeConfigFile(t, HostKubeConfig()) memberKubeconfig := PersistKubeConfigFile(t, MemberKubeConfig()) - gock.New(AddClusterScriptDomain). - Get(AddClusterScriptPath). - Persist(). - Reply(200) - defer gock.OffAll() - - type CommandCreatorSetup struct { - Client runtimeclient.Client - Counter *int - NameSuffix string - ExpectedHostArgs []string - ExpectedMemberArgs []string - HostReady bool - MemberReady bool - AllowUpdates bool - } - hostDeploymentName := test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager") - deployment := newDeployment(hostDeploymentName, 1) - deployment.Labels = map[string]string{"olm.owner.namespace": "toolchain-host-operator"} - - // the command creator mocks the execution of the add-cluster.sh. We check that we're passing the correct arguments - // and we also create the expected ToolchainCluster objects. - commandCreator := func(setup CommandCreatorSetup) (cc client.CommandCreator, counter *int) { - if setup.Counter == nil { - counter = pointer.Int(0) - } else { - counter = setup.Counter - } - return NewCommandCreator(t, "echo", "bash", - func(t *testing.T, args ...string) { - t.Helper() - persist := func(tc *toolchainv1alpha1.ToolchainCluster) { - t.Helper() - if setup.AllowUpdates { - if err := setup.Client.Create(context.TODO(), tc); err != nil { - if errors.IsAlreadyExists(err) { - current := &toolchainv1alpha1.ToolchainCluster{} - require.NoError(t, setup.Client.Get(context.TODO(), runtimeclient.ObjectKeyFromObject(tc), current)) - current.Spec = tc.Spec - current.Status = tc.Status - require.NoError(t, setup.Client.Update(context.TODO(), current)) - } else { - require.NoError(t, err) - } - } - } else { - require.NoError(t, setup.Client.Create(context.TODO(), tc)) - } - } - if *counter == 0 { - AssertFirstArgPrefixRestEqual("(.*)/add-cluster-(.*)", setup.ExpectedHostArgs...)(t, args...) - status := corev1.ConditionFalse - if setup.HostReady { - status = corev1.ConditionTrue - } - // there's always at most 1 toolchain cluster for the host in the member cluster - expectedHostToolchainClusterName, err := utils.GetToolchainClusterName("host", "https://cool-server.com", "") - require.NoError(t, err) - expectedHostToolchainCluster := &toolchainv1alpha1.ToolchainCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: expectedHostToolchainClusterName, - Namespace: "toolchain-member-operator", - }, - Spec: toolchainv1alpha1.ToolchainClusterSpec{ - APIEndpoint: "https://cool-server.com", - }, - Status: toolchainv1alpha1.ToolchainClusterStatus{ - Conditions: []toolchainv1alpha1.Condition{ - { - Type: toolchainv1alpha1.ConditionReady, - Status: status, - }, - }, - }, - } - persist(expectedHostToolchainCluster) - } else { - AssertFirstArgPrefixRestEqual("(.*)/add-cluster-(.*)", setup.ExpectedMemberArgs...)(t, args...) - status := corev1.ConditionFalse - if setup.MemberReady { - status = corev1.ConditionTrue - } - expectedMemberToolchainClusterName, err := utils.GetToolchainClusterName("member", "https://cool-server.com", setup.NameSuffix) - require.NoError(t, err) - expectedMemberToolchainCluster := &toolchainv1alpha1.ToolchainCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: expectedMemberToolchainClusterName, - Namespace: "toolchain-host-operator", - Labels: map[string]string{ - "namespace": "toolchain-member-operator", - }, - }, - Spec: toolchainv1alpha1.ToolchainClusterSpec{ - APIEndpoint: "https://cool-server.com", - }, - Status: toolchainv1alpha1.ToolchainClusterStatus{ - Conditions: []toolchainv1alpha1.Condition{ - { - Type: toolchainv1alpha1.ConditionReady, - Status: status, - }, - }, - }, - } - persist(expectedMemberToolchainCluster) - } - *counter = *counter + 1 - }), counter + toolchainClusterMemberSa := corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "toolchaincluster-member", + Namespace: "toolchain-member-operator", + }, } + toolchainClusterHostSa := corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "toolchaincluster-host", + Namespace: "toolchain-host-operator", + }, + } + + test.SetupGockForServiceAccounts(t, "https://cool-server.com", + types.NamespacedName{Name: toolchainClusterMemberSa.Name, Namespace: toolchainClusterMemberSa.Namespace}, + types.NamespacedName{Namespace: toolchainClusterHostSa.Namespace, Name: toolchainClusterHostSa.Name}, + ) + hostToolchainClusterName, err := utils.GetToolchainClusterName(string(configuration.Host), "https://cool-server.com", "") + require.NoError(t, err) + memberToolchainClusterName, err := utils.GetToolchainClusterName(string(configuration.Member), "https://cool-server.com", "") + require.NoError(t, err) t.Run("produces valid example SPC", func(t *testing.T) { // given term := NewFakeTerminalWithResponse("Y") - newClient, fakeClient := newFakeClientsFromRestConfig(t, deployment) + newClient, fakeClient := newFakeClientsFromRestConfig(t, &toolchainClusterMemberSa, &toolchainClusterHostSa) + // force the ready condition on the toolchaincluster created ( this is done by the tc controller in prod env ) + mockCreateToolchainClusterWithReadyCondition(fakeClient) ctx := newExtendedCommandContext(term, newClient) - addClusterCommand, counter := commandCreator(CommandCreatorSetup{ - Client: fakeClient, - HostReady: true, - MemberReady: true, - ExpectedHostArgs: []string{"--type", "host", "--host-kubeconfig", hostKubeconfig, "--host-ns", "toolchain-host-operator", "--member-kubeconfig", memberKubeconfig, "--member-ns", "toolchain-member-operator"}, - ExpectedMemberArgs: []string{"--type", "member", "--host-kubeconfig", hostKubeconfig, "--host-ns", "toolchain-host-operator", "--member-kubeconfig", memberKubeconfig, "--member-ns", "toolchain-member-operator"}, - }) expectedExampleSPC := &toolchainv1alpha1.SpaceProvisionerConfig{ TypeMeta: metav1.TypeMeta{ @@ -178,11 +84,26 @@ func TestRegisterMember(t *testing.T) { } // when - err := registerMemberCluster(ctx, addClusterCommand, 1*time.Second, newRegisterMemberArgsWith(hostKubeconfig, memberKubeconfig, false)) + err := registerMemberCluster(ctx, newRegisterMemberArgsWith(hostKubeconfig, memberKubeconfig, false)) // then require.NoError(t, err) - assert.Equal(t, 2, *counter) + // check the expected secrets are there with the kubeconfigs + // the member kubeconfig secret in the host namespace + verifyToolchainClusterSecret(t, fakeClient, toolchainClusterMemberSa.Name, "toolchain-host-operator", "toolchain-member-operator", memberToolchainClusterName) + // the host secret in the member namespace + verifyToolchainClusterSecret(t, fakeClient, toolchainClusterHostSa.Name, "toolchain-member-operator", "toolchain-host-operator", hostToolchainClusterName) + tcs := &toolchainv1alpha1.ToolchainClusterList{} + require.NoError(t, fakeClient.List(context.TODO(), tcs, runtimeclient.InNamespace("toolchain-host-operator"))) + assert.Len(t, tcs.Items, 1) + assert.Equal(t, memberToolchainClusterName, tcs.Items[0].Name) + // secret ref in tc matches + assert.Equal(t, toolchainClusterMemberSa.Name+"-"+memberToolchainClusterName, tcs.Items[0].Spec.SecretRef.Name) + require.NoError(t, fakeClient.List(context.TODO(), tcs, runtimeclient.InNamespace("toolchain-member-operator"))) + assert.Len(t, tcs.Items, 1) + assert.Equal(t, hostToolchainClusterName, tcs.Items[0].Name) + // secret ref in tc matches + assert.Equal(t, toolchainClusterHostSa.Name+"-"+hostToolchainClusterName, tcs.Items[0].Spec.SecretRef.Name) assert.Contains(t, term.Output(), "Modify and apply the following SpaceProvisionerConfig to the host cluster") actualExampleSPC := extractExampleSPCFromOutput(t, term.Output()) assert.Equal(t, *expectedExampleSPC, actualExampleSPC) @@ -191,22 +112,15 @@ func TestRegisterMember(t *testing.T) { t.Run("reports error when member ToolchainCluster is not ready in host", func(t *testing.T) { // given term := NewFakeTerminalWithResponse("Y") - newClient, fakeClient := newFakeClientsFromRestConfig(t, deployment) + newClient, fakeClient := newFakeClientsFromRestConfig(t, &toolchainClusterMemberSa, &toolchainClusterHostSa) + mockCreateToolchainClusterInNamespaceWithReadyCondition(fakeClient, "toolchain-member-operator") // we set to ready only the host toolchaincluster in member operator namespace ctx := newExtendedCommandContext(term, newClient) - addClusterCommand, counter := commandCreator(CommandCreatorSetup{ - Client: fakeClient, - HostReady: true, - MemberReady: false, - ExpectedHostArgs: []string{"--type", "host", "--host-kubeconfig", hostKubeconfig, "--host-ns", "toolchain-host-operator", "--member-kubeconfig", memberKubeconfig, "--member-ns", "toolchain-member-operator"}, - ExpectedMemberArgs: []string{"--type", "member", "--host-kubeconfig", hostKubeconfig, "--host-ns", "toolchain-host-operator", "--member-kubeconfig", memberKubeconfig, "--member-ns", "toolchain-member-operator"}, - }) // when - err := registerMemberCluster(ctx, addClusterCommand, 1*time.Second, newRegisterMemberArgsWith(hostKubeconfig, memberKubeconfig, false)) + err := registerMemberCluster(ctx, newRegisterMemberArgsWith(hostKubeconfig, memberKubeconfig, false)) // then require.Error(t, err) - assert.Equal(t, 2, *counter) tcs := &toolchainv1alpha1.ToolchainClusterList{} require.NoError(t, fakeClient.List(context.TODO(), tcs, runtimeclient.InNamespace("toolchain-host-operator"))) assert.Len(t, tcs.Items, 1) @@ -218,22 +132,15 @@ func TestRegisterMember(t *testing.T) { t.Run("reports error when host ToolchainCluster is not ready in member", func(t *testing.T) { // given term := NewFakeTerminalWithResponse("Y") - newClient, fakeClient := newFakeClientsFromRestConfig(t, deployment) + newClient, fakeClient := newFakeClientsFromRestConfig(t, &toolchainClusterMemberSa, &toolchainClusterHostSa) + mockCreateToolchainClusterInNamespaceWithReadyCondition(fakeClient, "toolchain-host-operator") // set to ready only the member toolchaincluster in host operator namespace ctx := newExtendedCommandContext(term, newClient) - addClusterCommand, counter := commandCreator(CommandCreatorSetup{ - Client: fakeClient, - HostReady: false, - MemberReady: false, - ExpectedHostArgs: []string{"--type", "host", "--host-kubeconfig", hostKubeconfig, "--host-ns", "toolchain-host-operator", "--member-kubeconfig", memberKubeconfig, "--member-ns", "toolchain-member-operator"}, - ExpectedMemberArgs: []string{"--type", "member", "--host-kubeconfig", hostKubeconfig, "--host-ns", "toolchain-host-operator", "--member-kubeconfig", memberKubeconfig, "--member-ns", "toolchain-member-operator"}, - }) // when - err := registerMemberCluster(ctx, addClusterCommand, 1*time.Second, newRegisterMemberArgsWith(hostKubeconfig, memberKubeconfig, false)) + err := registerMemberCluster(ctx, newRegisterMemberArgsWith(hostKubeconfig, memberKubeconfig, false)) // then require.Error(t, err) - assert.Equal(t, 1, *counter) tcs := &toolchainv1alpha1.ToolchainClusterList{} require.NoError(t, fakeClient.List(context.TODO(), tcs, runtimeclient.InNamespace("toolchain-host-operator"))) assert.Empty(t, tcs.Items) @@ -245,22 +152,15 @@ func TestRegisterMember(t *testing.T) { t.Run("single toolchain in cluster", func(t *testing.T) { // given term := NewFakeTerminalWithResponse("Y") - newClient, fakeClient := newFakeClientsFromRestConfig(t, deployment) + newClient, fakeClient := newFakeClientsFromRestConfig(t, &toolchainClusterMemberSa, &toolchainClusterHostSa) + mockCreateToolchainClusterWithReadyCondition(fakeClient) ctx := newExtendedCommandContext(term, newClient) - addClusterCommand, counter := commandCreator(CommandCreatorSetup{ - Client: fakeClient, - HostReady: true, - MemberReady: true, - ExpectedHostArgs: []string{"--type", "host", "--host-kubeconfig", hostKubeconfig, "--host-ns", "toolchain-host-operator", "--member-kubeconfig", memberKubeconfig, "--member-ns", "toolchain-member-operator"}, - ExpectedMemberArgs: []string{"--type", "member", "--host-kubeconfig", hostKubeconfig, "--host-ns", "toolchain-host-operator", "--member-kubeconfig", memberKubeconfig, "--member-ns", "toolchain-member-operator"}, - }) // when - err := registerMemberCluster(ctx, addClusterCommand, 1*time.Second, newRegisterMemberArgsWith(hostKubeconfig, memberKubeconfig, false)) + err := registerMemberCluster(ctx, newRegisterMemberArgsWith(hostKubeconfig, memberKubeconfig, false)) // then require.NoError(t, err) - assert.Equal(t, 2, *counter) assert.Contains(t, term.Output(), "Modify and apply the following SpaceProvisionerConfig to the host cluster") assert.Contains(t, term.Output(), "kind: SpaceProvisionerConfig") }) @@ -268,22 +168,15 @@ func TestRegisterMember(t *testing.T) { t.Run("single toolchain in cluster with --lets-encrypt", func(t *testing.T) { // given term := NewFakeTerminalWithResponse("Y") - newClient, fakeClient := newFakeClientsFromRestConfig(t, deployment) + newClient, fakeClient := newFakeClientsFromRestConfig(t, &toolchainClusterMemberSa, &toolchainClusterHostSa) + mockCreateToolchainClusterWithReadyCondition(fakeClient) ctx := newExtendedCommandContext(term, newClient) - addClusterCommand, counter := commandCreator(CommandCreatorSetup{ - Client: fakeClient, - HostReady: true, - MemberReady: true, - ExpectedHostArgs: []string{"--type", "host", "--host-kubeconfig", hostKubeconfig, "--host-ns", "toolchain-host-operator", "--member-kubeconfig", memberKubeconfig, "--member-ns", "toolchain-member-operator", "--lets-encrypt"}, - ExpectedMemberArgs: []string{"--type", "member", "--host-kubeconfig", hostKubeconfig, "--host-ns", "toolchain-host-operator", "--member-kubeconfig", memberKubeconfig, "--member-ns", "toolchain-member-operator", "--lets-encrypt"}, - }) // when - err := registerMemberCluster(ctx, addClusterCommand, 1*time.Second, newRegisterMemberArgsWith(hostKubeconfig, memberKubeconfig, true)) + err := registerMemberCluster(ctx, newRegisterMemberArgsWith(hostKubeconfig, memberKubeconfig, true)) // then require.NoError(t, err) - assert.Equal(t, 2, *counter) assert.Contains(t, term.Output(), "Modify and apply the following SpaceProvisionerConfig to the host cluster") assert.Contains(t, term.Output(), "kind: SpaceProvisionerConfig") }) @@ -291,7 +184,8 @@ func TestRegisterMember(t *testing.T) { t.Run("multiple toolchains in cluster", func(t *testing.T) { // given term := NewFakeTerminalWithResponse("Y") - newClient, fakeClient := newFakeClientsFromRestConfig(t, deployment) + newClient, fakeClient := newFakeClientsFromRestConfig(t, &toolchainClusterMemberSa, &toolchainClusterHostSa) + mockCreateToolchainClusterWithReadyCondition(fakeClient) ctx := newExtendedCommandContext(term, newClient) preexistingToolchainCluster := &toolchainv1alpha1.ToolchainCluster{ ObjectMeta: metav1.ObjectMeta{ @@ -314,21 +208,13 @@ func TestRegisterMember(t *testing.T) { preexistingToolchainCluster.Name = "member-cool-server.com1" require.NoError(t, fakeClient.Create(context.TODO(), preexistingToolchainCluster.DeepCopy())) - addClusterCommand, counter := commandCreator(CommandCreatorSetup{ - Client: fakeClient, - HostReady: true, - MemberReady: true, - NameSuffix: "2", - ExpectedHostArgs: []string{"--type", "host", "--host-kubeconfig", hostKubeconfig, "--host-ns", "toolchain-host-operator", "--member-kubeconfig", memberKubeconfig, "--member-ns", "toolchain-member-operator"}, - ExpectedMemberArgs: []string{"--type", "member", "--host-kubeconfig", hostKubeconfig, "--host-ns", "toolchain-host-operator", "--member-kubeconfig", memberKubeconfig, "--member-ns", "toolchain-member-operator", "--multi-member", "2"}, - }) - // when - err := registerMemberCluster(ctx, addClusterCommand, 1*time.Second, newRegisterMemberArgsWithSuffix(hostKubeconfig, memberKubeconfig, false, "2")) + err := registerMemberCluster(ctx, newRegisterMemberArgsWithSuffix(hostKubeconfig, memberKubeconfig, false, "2")) // then require.NoError(t, err) - assert.Equal(t, 2, *counter) + assert.Contains(t, term.Output(), "source cluster name: member-cool-server.com2") + assert.Contains(t, term.Output(), "The name of the target cluster: member-cool-server.com") assert.Contains(t, term.Output(), "Modify and apply the following SpaceProvisionerConfig to the host cluster") assert.Contains(t, term.Output(), "kind: SpaceProvisionerConfig") assert.Contains(t, term.Output(), "toolchainCluster: member-cool-server.com2") @@ -338,70 +224,40 @@ func TestRegisterMember(t *testing.T) { // given term1 := NewFakeTerminalWithResponse("Y") term2 := NewFakeTerminalWithResponse("Y") - newClient, fakeClient := newFakeClientsFromRestConfig(t, deployment) + newClient, fakeClient := newFakeClientsFromRestConfig(t, &toolchainClusterMemberSa, &toolchainClusterHostSa) + mockCreateToolchainClusterWithReadyCondition(fakeClient) ctx1 := newExtendedCommandContext(term1, newClient) ctx2 := newExtendedCommandContext(term2, newClient) - addClusterCommand, counter := commandCreator(CommandCreatorSetup{ - Client: fakeClient, - HostReady: true, - MemberReady: true, - AllowUpdates: true, - ExpectedHostArgs: []string{"--type", "host", "--host-kubeconfig", hostKubeconfig, "--host-ns", "toolchain-host-operator", "--member-kubeconfig", memberKubeconfig, "--member-ns", "toolchain-member-operator"}, - ExpectedMemberArgs: []string{"--type", "member", "--host-kubeconfig", hostKubeconfig, "--host-ns", "toolchain-host-operator", "--member-kubeconfig", memberKubeconfig, "--member-ns", "toolchain-member-operator"}, - }) // when - err1 := registerMemberCluster(ctx1, addClusterCommand, 1*time.Second, newRegisterMemberArgsWith(hostKubeconfig, memberKubeconfig, false)) - addClusterCommand, _ = commandCreator(CommandCreatorSetup{ - Client: fakeClient, - HostReady: true, - MemberReady: true, - NameSuffix: "1", - AllowUpdates: true, - ExpectedHostArgs: []string{"--type", "host", "--host-kubeconfig", hostKubeconfig, "--host-ns", "toolchain-host-operator", "--member-kubeconfig", memberKubeconfig, "--member-ns", "toolchain-member-operator"}, - ExpectedMemberArgs: []string{"--type", "member", "--host-kubeconfig", hostKubeconfig, "--host-ns", "toolchain-host-operator", "--member-kubeconfig", memberKubeconfig, "--member-ns", "toolchain-member-operator"}, - Counter: counter, - }) - err2 := registerMemberCluster(ctx2, addClusterCommand, 1*time.Second, newRegisterMemberArgsWithSuffix(hostKubeconfig, memberKubeconfig, false, "1")) + err1 := registerMemberCluster(ctx1, newRegisterMemberArgsWith(hostKubeconfig, memberKubeconfig, false)) + err2 := registerMemberCluster(ctx2, newRegisterMemberArgsWithSuffix(hostKubeconfig, memberKubeconfig, false, "1")) // then require.NoError(t, err1) - assert.Equal(t, 2, *counter) assert.Contains(t, term1.Output(), "Modify and apply the following SpaceProvisionerConfig to the host cluster") assert.Contains(t, term1.Output(), "kind: SpaceProvisionerConfig") require.Error(t, err2) assert.Equal(t, `Cannot proceed because of the following problems: -- the newly registered member cluster would have a different name (member-cool-server.com1) than the already existing one (member-cool-server.com) which would lead to invalid configuration. Consider using the --name-suffix parameter to match the existing member registration if you intend to just update it instead of creating a new registration`, err2.Error()) + - the newly registered member cluster would have a different name (member-cool-server.com1) than the already existing one (member-cool-server.com) which would lead to invalid configuration. Consider using the --name-suffix parameter to match the existing member registration if you intend to just update it instead of creating a new registration`, err2.Error()) }) t.Run("warns when updating existing registration", func(t *testing.T) { // given term1 := NewFakeTerminalWithResponse("Y") term2 := NewFakeTerminalWithResponse("Y") - newClient, fakeClient := newFakeClientsFromRestConfig(t, deployment) + newClient, fakeClient := newFakeClientsFromRestConfig(t, &toolchainClusterMemberSa, &toolchainClusterHostSa) + mockCreateToolchainClusterWithReadyCondition(fakeClient) ctx1 := newExtendedCommandContext(term1, newClient) ctx2 := newExtendedCommandContext(term2, newClient) - addClusterCommand, counter := commandCreator(CommandCreatorSetup{ - Client: fakeClient, - HostReady: true, - MemberReady: true, - AllowUpdates: true, - ExpectedHostArgs: []string{"--type", "host", "--host-kubeconfig", hostKubeconfig, "--host-ns", "toolchain-host-operator", "--member-kubeconfig", memberKubeconfig, "--member-ns", "toolchain-member-operator"}, - ExpectedMemberArgs: []string{"--type", "member", "--host-kubeconfig", hostKubeconfig, "--host-ns", "toolchain-host-operator", "--member-kubeconfig", memberKubeconfig, "--member-ns", "toolchain-member-operator"}, - }) // when - err1 := registerMemberCluster(ctx1, addClusterCommand, 1*time.Second, newRegisterMemberArgsWith(hostKubeconfig, memberKubeconfig, false)) - counter1 := *counter - *counter = 0 - err2 := registerMemberCluster(ctx2, addClusterCommand, 1*time.Second, newRegisterMemberArgsWithSuffix(hostKubeconfig, memberKubeconfig, false, "")) - counter2 := *counter + err1 := registerMemberCluster(ctx1, newRegisterMemberArgsWith(hostKubeconfig, memberKubeconfig, false)) + err2 := registerMemberCluster(ctx2, newRegisterMemberArgsWithSuffix(hostKubeconfig, memberKubeconfig, false, "")) // then require.NoError(t, err1) - assert.Equal(t, 2, counter1) - assert.Equal(t, 2, counter2) assert.Contains(t, term1.Output(), "Modify and apply the following SpaceProvisionerConfig to the host cluster") assert.Contains(t, term1.Output(), "kind: SpaceProvisionerConfig") @@ -415,7 +271,8 @@ func TestRegisterMember(t *testing.T) { t.Run("Errors when member already registered with multiple hosts", func(t *testing.T) { // given term := NewFakeTerminalWithResponse("Y") - newClient, fakeClient := newFakeClientsFromRestConfig(t, deployment) + newClient, fakeClient := newFakeClientsFromRestConfig(t, &toolchainClusterMemberSa, &toolchainClusterHostSa) + mockCreateToolchainClusterWithReadyCondition(fakeClient) ctx := newExtendedCommandContext(term, newClient) preexistingToolchainCluster1 := &toolchainv1alpha1.ToolchainCluster{ ObjectMeta: metav1.ObjectMeta{ @@ -454,30 +311,20 @@ func TestRegisterMember(t *testing.T) { require.NoError(t, fakeClient.Create(context.TODO(), preexistingToolchainCluster1.DeepCopy())) require.NoError(t, fakeClient.Create(context.TODO(), preexistingToolchainCluster2.DeepCopy())) - addClusterCommand, counter := commandCreator(CommandCreatorSetup{ - Client: fakeClient, - HostReady: true, - MemberReady: true, - NameSuffix: "1", - ExpectedHostArgs: []string{"--type", "host", "--host-kubeconfig", hostKubeconfig, "--host-ns", "toolchain-host-operator", "--member-kubeconfig", memberKubeconfig, "--member-ns", "toolchain-member-operator"}, - ExpectedMemberArgs: []string{"--type", "member", "--host-kubpconfig", hostKubeconfig, "--host-ns", "toolchain-host-operator", "--member-kubeconfig", memberKubeconfig, "--member-ns", "toolchain-member-operator", "--multi-member", "2"}, - }) - // when - err := registerMemberCluster(ctx, addClusterCommand, 1*time.Second, newRegisterMemberArgsWith(hostKubeconfig, memberKubeconfig, false)) + err := registerMemberCluster(ctx, newRegisterMemberArgsWith(hostKubeconfig, memberKubeconfig, false)) // then require.Error(t, err) assert.Contains(t, err.Error(), `Cannot proceed because of the following problems: -- member misconfigured: the member cluster (https://cool-server.com) is already registered with more than 1 host in namespace toolchain-member-operator`) - assert.Equal(t, 0, *counter) + - member misconfigured: the member cluster (https://cool-server.com) is already registered with more than 1 host in namespace toolchain-member-operator`) assert.NotContains(t, term.Output(), "kind: SpaceProvisionerConfig") }) t.Run("Errors when registering into another host", func(t *testing.T) { // given term := NewFakeTerminalWithResponse("Y") - newClient, fakeClient := newFakeClientsFromRestConfig(t, deployment) + newClient, fakeClient := newFakeClientsFromRestConfig(t) ctx := newExtendedCommandContext(term, newClient) preexistingToolchainCluster := &toolchainv1alpha1.ToolchainCluster{ ObjectMeta: metav1.ObjectMeta{ @@ -498,30 +345,20 @@ func TestRegisterMember(t *testing.T) { } require.NoError(t, fakeClient.Create(context.TODO(), preexistingToolchainCluster.DeepCopy())) - addClusterCommand, counter := commandCreator(CommandCreatorSetup{ - Client: fakeClient, - HostReady: true, - MemberReady: true, - NameSuffix: "1", - ExpectedHostArgs: []string{"--type", "host", "--host-kubeconfig", hostKubeconfig, "--host-ns", "toolchain-host-operator", "--member-kubeconfig", memberKubeconfig, "--member-ns", "toolchain-member-operator"}, - ExpectedMemberArgs: []string{"--type", "member", "--host-kubeconfig", hostKubeconfig, "--host-ns", "toolchain-host-operator", "--member-kubeconfig", memberKubeconfig, "--member-ns", "toolchain-member-operator", "--multi-member", "2"}, - }) - // when - err := registerMemberCluster(ctx, addClusterCommand, 1*time.Second, newRegisterMemberArgsWith(hostKubeconfig, memberKubeconfig, false)) + err := registerMemberCluster(ctx, newRegisterMemberArgsWith(hostKubeconfig, memberKubeconfig, false)) // then require.Error(t, err) assert.Contains(t, err.Error(), `Cannot proceed because of the following problems: -- the member is already registered with another host (https://not-so-cool-server.com) so registering it with the new one (https://cool-server.com) would result in an invalid configuration`) - assert.Equal(t, 0, *counter) + - the member is already registered with another host (https://not-so-cool-server.com) so registering it with the new one (https://cool-server.com) would result in an invalid configuration`) assert.NotContains(t, term.Output(), "kind: SpaceProvisionerConfig") }) t.Run("Errors when host with different name already exists", func(t *testing.T) { // given term := NewFakeTerminalWithResponse("Y") - newClient, fakeClient := newFakeClientsFromRestConfig(t, deployment) + newClient, fakeClient := newFakeClientsFromRestConfig(t) ctx := newExtendedCommandContext(term, newClient) preexistingToolchainCluster := &toolchainv1alpha1.ToolchainCluster{ ObjectMeta: metav1.ObjectMeta{ @@ -542,30 +379,20 @@ func TestRegisterMember(t *testing.T) { } require.NoError(t, fakeClient.Create(context.TODO(), preexistingToolchainCluster.DeepCopy())) - addClusterCommand, counter := commandCreator(CommandCreatorSetup{ - Client: fakeClient, - HostReady: true, - MemberReady: true, - NameSuffix: "1", - ExpectedHostArgs: []string{"--type", "host", "--host-kubeconfig", hostKubeconfig, "--host-ns", "toolchain-host-operator", "--member-kubeconfig", memberKubeconfig, "--member-ns", "toolchain-member-operator"}, - ExpectedMemberArgs: []string{"--type", "member", "--host-kubeconfig", hostKubeconfig, "--host-ns", "toolchain-host-operator", "--member-kubeconfig", memberKubeconfig, "--member-ns", "toolchain-member-operator", "--multi-member", "2"}, - }) - // when - err := registerMemberCluster(ctx, addClusterCommand, 1*time.Second, newRegisterMemberArgsWith(hostKubeconfig, memberKubeconfig, false)) + err := registerMemberCluster(ctx, newRegisterMemberArgsWith(hostKubeconfig, memberKubeconfig, false)) // then require.Error(t, err) assert.Contains(t, err.Error(), `Cannot proceed because of the following problems: -- the host is already in the member namespace using a ToolchainCluster object with the name 'host-with-weird-name' but the new registration would use a ToolchainCluster with the name 'host-cool-server.com' which would lead to an invalid configuration`) - assert.Equal(t, 0, *counter) + - the host is already in the member namespace using a ToolchainCluster object with the name 'host-with-weird-name' but the new registration would use a ToolchainCluster with the name 'host-cool-server.com' which would lead to an invalid configuration`) assert.NotContains(t, term.Output(), "kind: SpaceProvisionerConfig") }) t.Run("Errors when member with different name already exists", func(t *testing.T) { // given term := NewFakeTerminalWithResponse("Y") - newClient, fakeClient := newFakeClientsFromRestConfig(t, deployment) + newClient, fakeClient := newFakeClientsFromRestConfig(t) ctx := newExtendedCommandContext(term, newClient) preexistingToolchainCluster := &toolchainv1alpha1.ToolchainCluster{ ObjectMeta: metav1.ObjectMeta{ @@ -589,104 +416,110 @@ func TestRegisterMember(t *testing.T) { } require.NoError(t, fakeClient.Create(context.TODO(), preexistingToolchainCluster.DeepCopy())) - addClusterCommand, counter := commandCreator(CommandCreatorSetup{ - Client: fakeClient, - HostReady: true, - MemberReady: true, - ExpectedHostArgs: []string{"--type", "host", "--host-kubeconfig", hostKubeconfig, "--host-ns", "toolchain-host-operator", "--member-kubeconfig", memberKubeconfig, "--member-ns", "toolchain-member-operator"}, - ExpectedMemberArgs: []string{"--type", "member", "--host-kubeconfig", hostKubeconfig, "--host-ns", "toolchain-host-operator", "--member-kubeconfig", memberKubeconfig, "--member-ns", "toolchain-member-operator", "--multi-member", "2"}, - }) - // when - err := registerMemberCluster(ctx, addClusterCommand, 1*time.Second, newRegisterMemberArgsWith(hostKubeconfig, memberKubeconfig, false)) + err := registerMemberCluster(ctx, newRegisterMemberArgsWith(hostKubeconfig, memberKubeconfig, false)) // then require.Error(t, err) assert.Contains(t, err.Error(), `Cannot proceed because of the following problems: -- the newly registered member cluster would have a different name (member-cool-server.com) than the already existing one (member-with-weird-name) which would lead to invalid configuration. Consider using the --name-suffix parameter to match the existing member registration if you intend to just update it instead of creating a new registration`) - assert.Equal(t, 0, *counter) + - the newly registered member cluster would have a different name (member-cool-server.com) than the already existing one (member-with-weird-name) which would lead to invalid configuration. Consider using the --name-suffix parameter to match the existing member registration if you intend to just update it instead of creating a new registration`) assert.NotContains(t, term.Output(), "kind: SpaceProvisionerConfig") }) -} -func TestRunAddClusterScriptSuccess(t *testing.T) { - // given - SetFileConfig(t, Host(), Member()) - hostKubeconfig := PersistKubeConfigFile(t, HostKubeConfig()) - memberKubeconfig := PersistKubeConfigFile(t, MemberKubeConfig()) - gock.New(AddClusterScriptDomain). - Get(AddClusterScriptPath). - Persist(). - Reply(200) - defer gock.OffAll() - term := NewFakeTerminalWithResponse("Y") - - test := func(t *testing.T, clusterType configuration.ClusterType, nameSuffix string, letEncrypt bool, additionalExpectedArgs ...string) { + t.Run("reports error when member toolchaincluster ServiceAccount is not there", func(t *testing.T) { // given - expArgs := []string{"--type", clusterType.String(), "--host-kubeconfig", hostKubeconfig, "--host-ns", "host-ns", "--member-kubeconfig", memberKubeconfig, "--member-ns", "member-ns"} - expArgs = append(expArgs, additionalExpectedArgs...) - ocCommandCreator := NewCommandCreator(t, "echo", "bash", - AssertFirstArgPrefixRestEqual("(.*)/add-cluster-(.*)", expArgs...)) + term := NewFakeTerminalWithResponse("Y") + newClient, fakeClient := newFakeClientsFromRestConfig(t, &toolchainClusterHostSa) // we pre-provision only the host toolchaincluster ServiceAccount + mockCreateToolchainClusterWithReadyCondition(fakeClient) + ctx := newExtendedCommandContext(term, newClient) // when - err := runAddClusterScript(term, ocCommandCreator, clusterType, hostKubeconfig, "host-ns", memberKubeconfig, "member-ns", nameSuffix, letEncrypt) + err := registerMemberCluster(ctx, newRegisterMemberArgsWith(hostKubeconfig, memberKubeconfig, false)) // then - require.NoError(t, err) - // on Linux, the output contains `Command to be called: bash /tmp/add-cluster-` - // on macOS, the output contains something like `Command to be called: bash /var/folders/b8/wy8kq7_179l7yswz6gz6qx800000gp/T/add-cluster-369107288.sh` - assert.Contains(t, term.Output(), "Command to be called: bash ") - assert.Contains(t, term.Output(), "add-cluster-") - assert.Contains(t, term.Output(), strings.Join(expArgs, " ")) - } + require.Error(t, err) + assert.Contains(t, term.Output(), "The toolchain-member-operator/toolchaincluster-member ServiceAccount is not present in the member cluster.") + tcs := &toolchainv1alpha1.ToolchainClusterList{} + require.NoError(t, fakeClient.List(context.TODO(), tcs, runtimeclient.InNamespace("toolchain-host-operator"))) + assert.Empty(t, tcs.Items) + require.NoError(t, fakeClient.List(context.TODO(), tcs, runtimeclient.InNamespace("toolchain-member-operator"))) + assert.Len(t, tcs.Items, 1) + }) + + t.Run("reports error when host toolchaincluster ServiceAccount is not there", func(t *testing.T) { + // given + term := NewFakeTerminalWithResponse("Y") + newClient, fakeClient := newFakeClientsFromRestConfig(t) + ctx := newExtendedCommandContext(term, newClient) + + // when + err := registerMemberCluster(ctx, newRegisterMemberArgsWith(hostKubeconfig, memberKubeconfig, false)) + + // then + require.Error(t, err) + assert.Contains(t, term.Output(), "The toolchain-host-operator/toolchaincluster-host ServiceAccount is not present in the host cluster.") + tcs := &toolchainv1alpha1.ToolchainClusterList{} + require.NoError(t, fakeClient.List(context.TODO(), tcs, runtimeclient.InNamespace("toolchain-host-operator"))) + assert.Empty(t, tcs.Items) + require.NoError(t, fakeClient.List(context.TODO(), tcs, runtimeclient.InNamespace("toolchain-member-operator"))) + assert.Empty(t, tcs.Items) + }) +} - for _, clusterType := range configuration.ClusterTypes { - t.Run("for cluster name: "+clusterType.String(), func(t *testing.T) { - t.Run("single toolchain in cluster", func(t *testing.T) { - test(t, clusterType, "", false) - }) - t.Run("single toolchain in cluster with letsencrypt", func(t *testing.T) { - test(t, clusterType, "", true, "--lets-encrypt") - }) - t.Run("multiple toolchains in cluster", func(t *testing.T) { - test(t, clusterType, "asdf", false, "--multi-member", "asdf") - }) - t.Run("multiple toolchains in cluster with letsencrypt", func(t *testing.T) { - test(t, clusterType, "42", true, "--multi-member", "42", "--lets-encrypt") - }) - }) +func mockCreateToolchainClusterInNamespaceWithReadyCondition(fakeClient *test.FakeClient, namespace string) { + fakeClient.MockCreate = func(ctx context.Context, obj runtimeclient.Object, opts ...runtimeclient.CreateOption) error { + if obj, ok := obj.(*toolchainv1alpha1.ToolchainCluster); ok { + if obj.GetNamespace() == namespace { + obj.Status = toolchainv1alpha1.ToolchainClusterStatus{ + Conditions: []toolchainv1alpha1.Condition{ + { + Type: toolchainv1alpha1.ConditionReady, + Status: corev1.ConditionTrue, + }, + }, + } + } + } + return fakeClient.Client.Create(ctx, obj, opts...) } } -func TestRunAddClusterScriptFailed(t *testing.T) { - // given - SetFileConfig(t, Host(), Member()) - hostKubeconfig := PersistKubeConfigFile(t, HostKubeConfig()) - memberKubeconfig := PersistKubeConfigFile(t, MemberKubeConfig()) - gock.New(AddClusterScriptDomain). - Get(AddClusterScriptPath). - Persist(). - Reply(404) - defer gock.OffAll() - - for _, clusterType := range configuration.ClusterTypes { - t.Run("for cluster name: "+clusterType.String(), func(t *testing.T) { - // given - expArgs := []string{"--type", clusterType.String(), "--host-kubeconfig", hostKubeconfig, "--host-ns", "host-ns", "--member-kubeconfig", memberKubeconfig, "--member-ns", "member-ns", "--lets-encrypt"} - ocCommandCreator := NewCommandCreator(t, "echo", "bash", - AssertFirstArgPrefixRestEqual("(.*)/add-cluster-(.*)", expArgs...)) - term := NewFakeTerminalWithResponse("Y") - - // when - err := runAddClusterScript(term, ocCommandCreator, clusterType, hostKubeconfig, "host-ns", memberKubeconfig, "member-ns", "", true) - - // then - require.Error(t, err) - assert.NotContains(t, term.Output(), "Command to be called") - }) +func mockCreateToolchainClusterWithReadyCondition(fakeClient *test.FakeClient) { + fakeClient.MockCreate = func(ctx context.Context, obj runtimeclient.Object, opts ...runtimeclient.CreateOption) error { + if obj, ok := obj.(*toolchainv1alpha1.ToolchainCluster); ok { + obj.Status = toolchainv1alpha1.ToolchainClusterStatus{ + Conditions: []toolchainv1alpha1.Condition{ + { + Type: toolchainv1alpha1.ConditionReady, + Status: corev1.ConditionTrue, + }, + }, + } + } + return fakeClient.Client.Create(ctx, obj, opts...) } } +func verifyToolchainClusterSecret(t *testing.T, fakeClient *test.FakeClient, saName, secretNamespace, ctxNamespace, tcName string) { + secret := &corev1.Secret{} + secretName := fmt.Sprintf("%s-%s", saName, tcName) + require.NoError(t, fakeClient.Get(context.TODO(), runtimeclient.ObjectKey{Namespace: secretNamespace, Name: secretName}, secret)) + assert.NotEmpty(t, secret.Labels) + assert.Equal(t, tcName, secret.Labels[toolchainv1alpha1.ToolchainClusterLabel]) + assert.NotEmpty(t, secret.StringData["token"]) + require.Equal(t, fmt.Sprintf("token-secret-for-%s", saName), secret.StringData["token"]) + assert.NotEmpty(t, secret.StringData["kubeconfig"]) + apiConfig, err := clientcmd.Load([]byte(secret.StringData["kubeconfig"])) + require.NoError(t, err) + require.False(t, api.IsConfigEmpty(apiConfig)) + assert.Equal(t, "https://cool-server.com", apiConfig.Clusters["cluster"].Server) + assert.True(t, apiConfig.Clusters["cluster"].InsecureSkipTLSVerify) // by default the insecure flag is being set + assert.Equal(t, "cluster", apiConfig.Contexts["ctx"].Cluster) + assert.Equal(t, ctxNamespace, apiConfig.Contexts["ctx"].Namespace) + assert.NotEmpty(t, apiConfig.AuthInfos["auth"].Token) + require.Equal(t, fmt.Sprintf("token-secret-for-%s", saName), apiConfig.AuthInfos["auth"].Token) +} + func whenDeploymentThenUpdated(t *testing.T, fakeClient *test.FakeClient, namespacedName types.NamespacedName, currentReplicas int32, numberOfUpdateCalls *int) func(ctx context.Context, obj runtimeclient.Object, opts ...runtimeclient.UpdateOption) error { return func(ctx context.Context, obj runtimeclient.Object, opts ...runtimeclient.UpdateOption) error { if deployment, ok := obj.(*appsv1.Deployment); ok { @@ -705,12 +538,11 @@ func newFakeClientsFromRestConfig(t *testing.T, initObjs ...runtime.Object) (new return fakeClient.Client.Update(ctx, obj, opts...) } return func(cfg *rest.Config) (runtimeclient.Client, error) { - assert.Contains(t, cfg.Host, "http") - assert.Contains(t, cfg.Host, "://") - assert.Contains(t, cfg.Host, ".com") - return fakeClient, nil - }, - fakeClient + assert.Contains(t, cfg.Host, "http") + assert.Contains(t, cfg.Host, "://") + assert.Contains(t, cfg.Host, ".com") + return fakeClient, nil + }, fakeClient } func extractExampleSPCFromOutput(t *testing.T, output string) toolchainv1alpha1.SpaceProvisionerConfig { @@ -738,6 +570,7 @@ func newRegisterMemberArgsWith(hostKubeconfig, memberKubeconfig string, useLetsE args.hostKubeConfig = hostKubeconfig args.memberKubeConfig = memberKubeconfig args.useLetsEncrypt = useLetsEncrypt + args.waitForReadyTimeout = 1 * time.Second return args } diff --git a/pkg/cmd/generate/cli_configs.go b/pkg/cmd/generate/cli_configs.go index f173441..c2a3f3d 100644 --- a/pkg/cmd/generate/cli_configs.go +++ b/pkg/cmd/generate/cli_configs.go @@ -193,7 +193,7 @@ func generateForCluster(ctx *generateContext, clusterType configuration.ClusterT saNamespace = sa.Namespace } ctx.Printlnf("Getting token for SA '%s' in namespace '%s'", sa.Name, saNamespace) - token, err := getServiceAccountToken(externalClient, types.NamespacedName{ + token, err := GetServiceAccountToken(externalClient, types.NamespacedName{ Namespace: saNamespace, Name: sa.Name}, ctx.tokenExpirationDays) if token == "" || err != nil { @@ -238,11 +238,11 @@ func buildClientFromKubeconfigFiles(ctx *generateContext, API string, kubeconfig return nil, fmt.Errorf("could not setup client from any of the provided kubeconfig files for the '%s' cluster", API) } -// getServiceAccountToken returns the SA's token or returns an error if none was found. +// GetServiceAccountToken returns the SA's token or returns an error if none was found. // NOTE: due to a changes in OpenShift 4.11, tokens are not listed as `secrets` in ServiceAccounts. // The recommended solution is to use the TokenRequest API when server version >= 4.11 // (see https://docs.openshift.com/container-platform/4.11/release_notes/ocp-4-11-release-notes.html#ocp-4-11-notable-technical-changes) -func getServiceAccountToken(cl *rest.RESTClient, namespacedName types.NamespacedName, tokenExpirationDays uint) (string, error) { +func GetServiceAccountToken(cl *rest.RESTClient, namespacedName runtimeclient.ObjectKey, tokenExpirationDays uint) (string, error) { tokenRequest := &authv1.TokenRequest{ Spec: authv1.TokenRequestSpec{ ExpirationSeconds: pointer.Int64(int64(tokenExpirationDays * 24 * 60 * 60)), diff --git a/pkg/cmd/generate/cli_configs_test.go b/pkg/cmd/generate/cli_configs_test.go index 9aa1239..73932b2 100644 --- a/pkg/cmd/generate/cli_configs_test.go +++ b/pkg/cmd/generate/cli_configs_test.go @@ -236,7 +236,7 @@ func TestGetServiceAccountToken(t *testing.T) { cl, err := newGockRESTClient("secret_token", "https://api.example.com") require.NoError(t, err) // when - actualToken, err := getServiceAccountToken(cl, types.NamespacedName{ + actualToken, err := GetServiceAccountToken(cl, types.NamespacedName{ Namespace: "openshift-customer-monitoring", Name: "loki", }, 365) diff --git a/pkg/utils/util.go b/pkg/utils/util.go index 864534d..22202d3 100644 --- a/pkg/utils/util.go +++ b/pkg/utils/util.go @@ -6,6 +6,8 @@ import ( "strings" ) +const K8sLabelWithoutSuffixMaxLength = 62 + // Contains checks if the given slice of strings contains the given string func Contains(slice []string, value string) bool { for _, role := range slice { @@ -37,8 +39,17 @@ func GetToolchainClusterName(clusterType, serverAPIEndpoint, suffix string) (str // in the original script. fixedLength := len(clusterType) + len(suffix) + 1 - maxAllowedClusterHostNameLen := 62 - fixedLength // I think 62 is here, because we might default the suffix to "1" later on + maxAllowedClusterHostNameLen := K8sLabelWithoutSuffixMaxLength - fixedLength // I think 62 is here, because we might default the suffix to "1" later on + + clusterHostName, err := getClusterHostName(serverAPIEndpoint, maxAllowedClusterHostNameLen, suffix) + if err != nil { + return "", err + } + return fmt.Sprintf("%s-%s", clusterType, clusterHostName), nil +} + +func getClusterHostName(serverAPIEndpoint string, maxAllowedClusterHostNameLen int, suffix string) (string, error) { clusterHostName, err := sanitizeEndpointForUsageAsName(serverAPIEndpoint) if err != nil { return "", fmt.Errorf("failed to sanitize the endpoint for naming purposes: %w", err) @@ -51,8 +62,7 @@ func GetToolchainClusterName(clusterType, serverAPIEndpoint, suffix string) (str suffix = "1" } } - - return fmt.Sprintf("%s-%s%s", clusterType, clusterHostName, suffix), nil + return fmt.Sprintf("%s%s", clusterHostName, suffix), nil } func sanitizeEndpointForUsageAsName(apiEndpoint string) (string, error) {