diff --git a/pkg/cmd/adm/restart.go b/pkg/cmd/adm/restart.go index 9625e90..d5e23b0 100644 --- a/pkg/cmd/adm/restart.go +++ b/pkg/cmd/adm/restart.go @@ -41,8 +41,8 @@ func NewRestartCmd() *cobra.Command { } func restart(ctx *clicontext.CommandContext, clusterNames ...string) error { - if clusterNames == nil { - return fmt.Errorf("please provide a cluster name to restart the operator e.g `ksctl adm restart host`") + if clusterNames == nil || len(clusterNames) != 1 { + return fmt.Errorf("please provide 1 cluster name to restart the operator e.g `ksctl adm restart host`") } clusterName := clusterNames[0] kubeConfigFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag() @@ -76,7 +76,7 @@ func restart(ctx *clicontext.CommandContext, clusterNames ...string) error { } if !ctx.AskForConfirmation( - ioutils.WithMessagef("restart the '%s' operator in namespace '%s'", clusterName, cfg.OperatorNamespace)) { + ioutils.WithMessagef("restart all the deployments in the cluster '%s' and namespace '%s' \n", clusterName, cfg.OperatorNamespace)) { return nil } @@ -84,45 +84,46 @@ func restart(ctx *clicontext.CommandContext, clusterNames ...string) error { } func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { - fmt.Printf("Fetching the current OLM and non-OLM deployments of the operator in %s", ns) + fmt.Printf("Fetching the current OLM and non-OLM deployments of the operator in %s \n", ns) olmDeploymentList, nonOlmDeploymentlist, err := getExistingDeployments(cl, ns) if err != nil { return err } - if olmDeploymentList == nil { + if len(olmDeploymentList.Items) == 0 { return fmt.Errorf("OLM based deployment not found in %s", ns) - } - for _, olmDeployment := range olmDeploymentList.Items { - fmt.Printf("Proceeding to delete the Pods of %v", olmDeployment) + } else { + for _, olmDeployment := range olmDeploymentList.Items { + fmt.Printf("Proceeding to delete the Pods of %v \n", olmDeployment) - if err := deletePods(ctx, cl, olmDeployment, f, ioStreams); err != nil { - return err + if err := deletePods(ctx, cl, olmDeployment, f, ioStreams); err != nil { + return err + } } } - if nonOlmDeploymentlist != nil { + if len(nonOlmDeploymentlist.Items) != 0 { for _, nonOlmDeployment := range nonOlmDeploymentlist.Items { - fmt.Printf("Proceeding to restart the non-OLM deployment %v", nonOlmDeployment) + fmt.Printf("Proceeding to restart the non-OLM deployment %v \n", nonOlmDeployment) if err := restartNonOlmDeployments(nonOlmDeployment, f, ioStreams); err != nil { return err } //check the rollout status - fmt.Printf("Checking the status of the rolled out deployment %v", nonOlmDeployment) + fmt.Printf("Checking the status of the rolled out deployment %v \n", nonOlmDeployment) if err := checkRolloutStatus(f, ioStreams, "provider=codeready-toolchain"); err != nil { return err } } } else { - fmt.Printf("non-OLM based deployment not found in %s", ns) + fmt.Printf("non-OLM based deployment not found in %s \n", ns) } return nil } func deletePods(ctx *clicontext.CommandContext, cl runtimeclient.Client, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { - fmt.Printf("Listing the pods to be deleted") + fmt.Printf("Listing the pods to be deleted \n") //get pods by label selector from the deployment pods := corev1.PodList{} selector, _ := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) @@ -131,7 +132,7 @@ func deletePods(ctx *clicontext.CommandContext, cl runtimeclient.Client, deploym runtimeclient.InNamespace(deployment.Namespace)); err != nil { return err } - fmt.Printf("Starting to delete the pods") + fmt.Printf("Starting to delete the pods \n") //delete pods for _, pod := range pods.Items { pod := pod // TODO We won't need it after upgrading to go 1.22: https://go.dev/blog/loopvar-preview diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index 291c4f1..0dd30b0 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -6,6 +6,7 @@ import ( "net/http" "testing" + clicontext "github.com/kubesaw/ksctl/pkg/context" . "github.com/kubesaw/ksctl/pkg/test" "github.com/stretchr/testify/require" @@ -25,84 +26,131 @@ import ( func TestRestart(t *testing.T) { // given - SetFileConfig(t, Host()) - namespacedName := types.NamespacedName{ - Namespace: "toolchain-host-operator", - Name: "host-operator-controller-manager", - } - var rolloutGroupVersionEncoder = schema.GroupVersion{Group: "apps", Version: "v1"} - deployment1 := newDeployment(namespacedName, 1) - ns := scheme.Codecs.WithoutConversion() - tf := cmdtesting.NewTestFactory().WithNamespace(namespacedName.Namespace) - tf.ClientConfigVal = cmdtesting.DefaultClientConfig() - - info, _ := runtime.SerializerInfoForMediaType(ns.SupportedMediaTypes(), runtime.ContentTypeJSON) - encoder := ns.EncoderForVersion(info.Serializer, rolloutGroupVersionEncoder) - tf.Client = &RolloutRestartRESTClient{ - RESTClient: &fake.RESTClient{ - GroupVersion: rolloutGroupVersionEncoder, - NegotiatedSerializer: ns, - Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { - responseDeployment := &appsv1.Deployment{} - responseDeployment.Name = deployment1.Name - responseDeployment.Labels = make(map[string]string) - responseDeployment.Labels["kubesaw-control-plane"] = "kubesaw-controller-manager" - body := io.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(encoder, responseDeployment)))) - return &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: body}, nil - }), + tests := map[string]struct { + namespace string + name string + labelKey string + labelValue string + expectedMsg string + labelSelector string + expectedOutput string + }{ + "OlmHostDeployment": { + namespace: "toolchain-host-operator", + name: "host-operator-controller-manager", + labelKey: "kubesaw-control-plane", + labelValue: "kubesaw-controller-manager", + expectedMsg: "deployment \"host-operator-controller-manager\" successfully rolled out\n", + labelSelector: "kubesaw-control-plane=kubesaw-controller-manager", + }, + "NonOlmHostDeployment": { + namespace: "toolchain-host-operator", + name: "registration-service", + labelKey: "provider", + labelValue: "codeready-toolchain", + expectedMsg: "deployment \"registration-service\" successfully rolled out\n", + labelSelector: "provider=codeready-toolchain", + expectedOutput: "deployment.apps/registration-service restarted\n", + }, + "OlmMemberDeployment": { + namespace: "toolchain-member-operator", + name: "member-operator-controller-manager", + labelKey: "kubesaw-control-plane", + labelValue: "kubesaw-controller-manager", + expectedMsg: "deployment \"member-operator-controller-manager\" successfully rolled out\n", + labelSelector: "kubesaw-control-plane=kubesaw-controller-manager", + }, + "NonOlmMemberDeployment": { + namespace: "toolchain-member-operator", + name: "member-webhooks", + labelKey: "provider", + labelValue: "codeready-toolchain", + expectedMsg: "deployment \"member-webhooks\" successfully rolled out\n", + labelSelector: "provider=codeready-toolchain", + expectedOutput: "deployment.apps/member-webhooks restarted\n", }, } - tf.FakeDynamicClient.WatchReactionChain = nil - tf.FakeDynamicClient.AddWatchReactor("*", func(action cgtesting.Action) (handled bool, ret watch.Interface, err error) { - fw := watch.NewFake() - dep := &appsv1.Deployment{} - dep.Name = deployment1.Name - dep.Status = appsv1.DeploymentStatus{ - Replicas: 1, - UpdatedReplicas: 1, - ReadyReplicas: 1, - AvailableReplicas: 1, - UnavailableReplicas: 0, - Conditions: []appsv1.DeploymentCondition{{ - Type: appsv1.DeploymentAvailable, - }}, - } - dep.Labels = make(map[string]string) - dep.Labels["kubesaw-control-plane"] = "kubesaw-controller-manager" - c, err := runtime.DefaultUnstructuredConverter.ToUnstructured(dep.DeepCopyObject()) - if err != nil { - t.Errorf("unexpected err %s", err) - } - u := &unstructured.Unstructured{} - u.SetUnstructuredContent(c) - go fw.Add(u) - return true, fw, nil - }) - - //add comments that it is checking the output from kubectl - streams, _, buf, _ := genericclioptions.NewTestIOStreams() - t.Run("Rollout restart of non-olm deployments is successful", func(t *testing.T) { - // given + for k, tc := range tests { + t.Run(k, func(t *testing.T) { + //given + namespacedName := types.NamespacedName{ + Namespace: tc.namespace, + Name: tc.name, + } + var rolloutGroupVersionEncoder = schema.GroupVersion{Group: "apps", Version: "v1"} + deployment1 := newDeployment(namespacedName, 1) + ns := scheme.Codecs.WithoutConversion() + tf := cmdtesting.NewTestFactory().WithNamespace(namespacedName.Namespace) + tf.ClientConfigVal = cmdtesting.DefaultClientConfig() - err := restartNonOlmDeployments(*deployment1, tf, streams) - expectedOutput := "deployment.apps/" + deployment1.Name + " restarted\n" - require.NoError(t, err) - require.Contains(t, buf.String(), expectedOutput) + info, _ := runtime.SerializerInfoForMediaType(ns.SupportedMediaTypes(), runtime.ContentTypeJSON) + encoder := ns.EncoderForVersion(info.Serializer, rolloutGroupVersionEncoder) + tf.Client = &RolloutRestartRESTClient{ + RESTClient: &fake.RESTClient{ + GroupVersion: rolloutGroupVersionEncoder, + NegotiatedSerializer: ns, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + responseDeployment := &appsv1.Deployment{} + responseDeployment.Name = deployment1.Name + responseDeployment.Labels = make(map[string]string) + responseDeployment.Labels[tc.labelKey] = tc.labelValue + body := io.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(encoder, responseDeployment)))) + return &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: body}, nil + }), + }, + } + tf.FakeDynamicClient.WatchReactionChain = nil + tf.FakeDynamicClient.AddWatchReactor("*", func(action cgtesting.Action) (handled bool, ret watch.Interface, err error) { + fw := watch.NewFake() + dep := &appsv1.Deployment{} + dep.Name = deployment1.Name + dep.Status = appsv1.DeploymentStatus{ + Replicas: 1, + UpdatedReplicas: 1, + ReadyReplicas: 1, + AvailableReplicas: 1, + UnavailableReplicas: 0, + Conditions: []appsv1.DeploymentCondition{{ + Type: appsv1.DeploymentAvailable, + }}, + } + dep.Labels = make(map[string]string) + dep.Labels[tc.labelKey] = tc.labelValue + c, err := runtime.DefaultUnstructuredConverter.ToUnstructured(dep.DeepCopyObject()) + if err != nil { + t.Errorf("unexpected err %s", err) + } + u := &unstructured.Unstructured{} + u.SetUnstructuredContent(c) + go fw.Add(u) + return true, fw, nil + }) - }) + streams, _, buf, _ := genericclioptions.NewTestIOStreams() + deployment := newDeployment(namespacedName, 1) + deployment.Labels = map[string]string{tc.labelKey: tc.labelValue} + term := NewFakeTerminalWithResponse("Y") + newClient, fakeClient := NewFakeClients(t, deployment) + ctx := clicontext.NewCommandContext(term, newClient) - t.Run("check rollout status of deployments is successful", func(t *testing.T) { - //when - err := checkRolloutStatus(tf, streams, "kubesaw-control-plane=kubesaw-controller-manager") + //when + err := restartDeployment(ctx, fakeClient, namespacedName.Namespace, tf, streams) + require.NoError(t, err) - //then - require.NoError(t, err) + err1 := checkRolloutStatus(tf, streams, tc.labelSelector) + require.NoError(t, err1) + //checking the output from kubectl + require.Contains(t, buf.String(), tc.expectedMsg) - expectedMsg := "deployment \"host-operator-controller-manager\" successfully rolled out\n" - require.Contains(t, buf.String(), expectedMsg) - - }) + if tc.labelValue == "codeready-toolchain" { + err := restartNonOlmDeployments(*deployment1, tf, streams) + require.NoError(t, err) + //checking the output from kubectl + require.Contains(t, buf.String(), tc.expectedOutput) + } + }) + } } func newDeployment(namespacedName types.NamespacedName, replicas int32) *appsv1.Deployment { //nolint:unparam