Skip to content

Commit

Permalink
Rc1
Browse files Browse the repository at this point in the history
Signed-off-by: Feny Mehta <[email protected]>
  • Loading branch information
fbm3307 committed Sep 26, 2024
1 parent d5e5280 commit b6f3df1
Show file tree
Hide file tree
Showing 2 changed files with 64 additions and 84 deletions.
60 changes: 30 additions & 30 deletions pkg/cmd/adm/restart.go
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
package adm

import (
"context"
"fmt"
"os"

"github.com/kubesaw/ksctl/pkg/client"
Expand All @@ -21,16 +19,21 @@ import (

// NewRestartCmd() is a function to restart the whole operator, it relies on the target cluster and fetches the cluster config
// 1. If the command is run for host operator, it restart the whole host operator.(it deletes olm based pods(host-operator pods),
// waits for the new deployment to come up, then uses rollout-restart command for non-olm based - registration-service)
// waits for the new pods to come up, then uses rollout-restart command for non-olm based - registration-service)
// 2. If the command is run for member operator, it restart the whole member operator.(it deletes olm based pods(member-operator pods),
// waits for the new deployment to come up, then uses rollout-restart command for non-olm based deployments - webhooks)
// waits for the new pods to come up, then uses rollout-restart command for non-olm based deployments - webhooks)
func NewRestartCmd() *cobra.Command {
command := &cobra.Command{
Use: "restart <cluster-name>",
Short: "Restarts an operator",
Long: `Restarts the whole operator in the given cluster name.
It restarts the operator and checks the status of the deployment`,
Args: cobra.RangeArgs(0, 1),
Long: `Restarts the whole operator, it relies on the target cluster and fetches the cluster config
1. If the command is run for host operator, it restart the whole host operator.
(it deletes olm based pods(host-operator pods),waits for the new pods to
come up, then uses rollout-restart command for non-olm based deployments - registration-service)
2. If the command is run for member operator, it restart the whole member operator.
(it deletes olm based pods(member-operator pods),waits for the new pods
to come up, then uses rollout-restart command for non-olm based deployments - webhooks)`,
Args: cobra.ExactArgs(1),

Check warning on line 36 in pkg/cmd/adm/restart.go

View check run for this annotation

Codecov / codecov/patch

pkg/cmd/adm/restart.go#L27-L36

Added lines #L27 - L36 were not covered by tests
RunE: func(cmd *cobra.Command, args []string) error {
term := ioutils.NewTerminal(cmd.InOrStdin, cmd.OutOrStdout)
ctx := clicontext.NewCommandContext(term, client.DefaultNewClient)
Expand All @@ -41,9 +44,6 @@ func NewRestartCmd() *cobra.Command {
}

func restart(ctx *clicontext.CommandContext, clusterNames ...string) error {
if clusterNames == nil || len(clusterNames) != 1 {
return fmt.Errorf("please provide 1 cluster name to restart the operator e.g `ksctl adm restart host`")
}
clusterName := clusterNames[0]
kubeConfigFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag()
factory := cmdutil.NewFactory(cmdutil.NewMatchVersionFlags(kubeConfigFlags))
Expand Down Expand Up @@ -83,18 +83,18 @@ func restart(ctx *clicontext.CommandContext, clusterNames ...string) error {
}

func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error {
fmt.Printf("Fetching the current OLM and non-OLM deployments of the operator in %s \n", ns)
ctx.Printlnf("Fetching the current OLM and non-OLM deployments of the operator in %s namespace", ns)

olmDeploymentList, nonOlmDeploymentlist, err := getExistingDeployments(cl, ns)
olmDeploymentList, nonOlmDeploymentlist, err := getExistingDeployments(ctx, cl, ns)
if err != nil {
return err
}

if len(olmDeploymentList.Items) == 0 {
fmt.Printf("OLM based deployment not found in %s", ns)
ctx.Printlnf("No OLM based deployment restart happend as Olm deployment found in namespace %s is 0", ns)

Check failure on line 94 in pkg/cmd/adm/restart.go

View workflow job for this annotation

GitHub Actions / GolangCI Lint

`happend` is a misspelling of `happened` (misspell)
} else {
for _, olmDeployment := range olmDeploymentList.Items {
fmt.Printf("Proceeding to delete the Pods of %v \n", olmDeployment)
ctx.Printlnf("Proceeding to delete the Pods of %v", olmDeployment)

if err := deleteAndWaitForPods(ctx, cl, olmDeployment, f, ioStreams); err != nil {
return err

Check warning on line 100 in pkg/cmd/adm/restart.go

View check run for this annotation

Codecov / codecov/patch

pkg/cmd/adm/restart.go#L100

Added line #L100 was not covered by tests
Expand All @@ -104,25 +104,25 @@ func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client,
if len(nonOlmDeploymentlist.Items) != 0 {
for _, nonOlmDeployment := range nonOlmDeploymentlist.Items {

fmt.Printf("Proceeding to restart the non-OLM deployment %v \n", nonOlmDeployment)
ctx.Printlnf("Proceeding to restart the non-OLM deployment %v", nonOlmDeployment)

if err := restartNonOlmDeployments(nonOlmDeployment, f, ioStreams); err != nil {
if err := restartNonOlmDeployments(ctx, nonOlmDeployment, f, ioStreams); err != nil {
return err

Check warning on line 110 in pkg/cmd/adm/restart.go

View check run for this annotation

Codecov / codecov/patch

pkg/cmd/adm/restart.go#L110

Added line #L110 was not covered by tests
}
//check the rollout status
fmt.Printf("Checking the status of the rolled out deployment %v \n", nonOlmDeployment)
if err := checkRolloutStatus(f, ioStreams, "provider=codeready-toolchain"); err != nil {
ctx.Printlnf("Checking the status of the rolled out deployment %v", nonOlmDeployment)
if err := checkRolloutStatus(ctx, f, ioStreams, "provider=codeready-toolchain"); err != nil {
return err

Check warning on line 115 in pkg/cmd/adm/restart.go

View check run for this annotation

Codecov / codecov/patch

pkg/cmd/adm/restart.go#L115

Added line #L115 was not covered by tests
}
}
} else {
fmt.Printf("non-OLM based deployment not found in %s \n", ns)
ctx.Printlnf("No Non-OLM based deployment restart happend as Non-Olm deployment found in namespace %s is 0", ns)

Check failure on line 119 in pkg/cmd/adm/restart.go

View workflow job for this annotation

GitHub Actions / GolangCI Lint

`happend` is a misspelling of `happened` (misspell)
}
return nil
}

func deleteAndWaitForPods(ctx *clicontext.CommandContext, cl runtimeclient.Client, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error {
fmt.Printf("Listing the pods to be deleted \n")
ctx.Printlnf("Listing the pods to be deleted")
//get pods by label selector from the deployment
pods := corev1.PodList{}
selector, _ := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
Expand All @@ -131,25 +131,25 @@ func deleteAndWaitForPods(ctx *clicontext.CommandContext, cl runtimeclient.Clien
runtimeclient.InNamespace(deployment.Namespace)); err != nil {
return err
}
fmt.Printf("Starting to delete the pods \n")
ctx.Printlnf("Starting to delete the pods")
//delete pods
for _, pod := range pods.Items {
pod := pod // TODO We won't need it after upgrading to go 1.22: https://go.dev/blog/loopvar-preview
if err := cl.Delete(ctx, &pod); err != nil {
return err

Check warning on line 139 in pkg/cmd/adm/restart.go

View check run for this annotation

Codecov / codecov/patch

pkg/cmd/adm/restart.go#L139

Added line #L139 was not covered by tests
}

fmt.Printf("Checking the status of the rolled out deployment %v", deployment)
ctx.Printlnf("Checking the status of the deleted pod's deployment %v", deployment)
//check the rollout status
if err := checkRolloutStatus(f, ioStreams, "kubesaw-control-plane=kubesaw-controller-manager"); err != nil {
if err := checkRolloutStatus(ctx, f, ioStreams, "kubesaw-control-plane=kubesaw-controller-manager"); err != nil {
return err

Check warning on line 145 in pkg/cmd/adm/restart.go

View check run for this annotation

Codecov / codecov/patch

pkg/cmd/adm/restart.go#L145

Added line #L145 was not covered by tests
}
}
return nil

}

func restartNonOlmDeployments(deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error {
func restartNonOlmDeployments(ctx *clicontext.CommandContext, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error {

o := kubectlrollout.NewRolloutRestartOptions(ioStreams)

Expand All @@ -162,11 +162,11 @@ func restartNonOlmDeployments(deployment appsv1.Deployment, f cmdutil.Factory, i
if err := o.Validate(); err != nil {
panic(err)

Check warning on line 163 in pkg/cmd/adm/restart.go

View check run for this annotation

Codecov / codecov/patch

pkg/cmd/adm/restart.go#L163

Added line #L163 was not covered by tests
}
fmt.Printf("Running the rollout restart command for non-olm deployment %v", deployment)
ctx.Printlnf("Running the rollout restart command for non-olm deployment %v", deployment)
return o.RunRestart()
}

func checkRolloutStatus(f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error {
func checkRolloutStatus(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error {
cmd := kubectlrollout.NewRolloutStatusOptions(ioStreams)

if err := cmd.Complete(f, []string{"deployment"}); err != nil {
Expand All @@ -176,21 +176,21 @@ func checkRolloutStatus(f cmdutil.Factory, ioStreams genericclioptions.IOStreams
if err := cmd.Validate(); err != nil {
panic(err)

Check warning on line 177 in pkg/cmd/adm/restart.go

View check run for this annotation

Codecov / codecov/patch

pkg/cmd/adm/restart.go#L177

Added line #L177 was not covered by tests
}
fmt.Printf("Running the Rollout status to check the status of the deployment")
ctx.Printlnf("Running the Rollout status to check the status of the deployment")
return cmd.Run()
}

func getExistingDeployments(cl runtimeclient.Client, ns string) (*appsv1.DeploymentList, *appsv1.DeploymentList, error) {
func getExistingDeployments(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string) (*appsv1.DeploymentList, *appsv1.DeploymentList, error) {

olmDeployments := &appsv1.DeploymentList{}
if err := cl.List(context.TODO(), olmDeployments,
if err := cl.List(ctx, olmDeployments,
runtimeclient.InNamespace(ns),
runtimeclient.MatchingLabels{"kubesaw-control-plane": "kubesaw-controller-manager"}); err != nil {
return nil, nil, err

Check warning on line 189 in pkg/cmd/adm/restart.go

View check run for this annotation

Codecov / codecov/patch

pkg/cmd/adm/restart.go#L189

Added line #L189 was not covered by tests
}

nonOlmDeployments := &appsv1.DeploymentList{}
if err := cl.List(context.TODO(), nonOlmDeployments,
if err := cl.List(ctx, nonOlmDeployments,
runtimeclient.InNamespace(ns),
runtimeclient.MatchingLabels{"provider": "codeready-toolchain"}); err != nil {
return nil, nil, err

Check warning on line 196 in pkg/cmd/adm/restart.go

View check run for this annotation

Codecov / codecov/patch

pkg/cmd/adm/restart.go#L196

Added line #L196 was not covered by tests
Expand Down
88 changes: 34 additions & 54 deletions pkg/cmd/adm/restart_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,23 +54,23 @@ func TestRestartDeployment(t *testing.T) {
labelSelector: "provider=codeready-toolchain",
expectedOutput: "deployment.apps/registration-service restarted\n",
},
"OlmMemberDeployment": {
namespace: "toolchain-member-operator",
name: "member-operator-controller-manager",
labelKey: "kubesaw-control-plane",
labelValue: "kubesaw-controller-manager",
expectedMsg: "deployment \"member-operator-controller-manager\" successfully rolled out\n",
labelSelector: "kubesaw-control-plane=kubesaw-controller-manager",
},
"NonOlmMemberDeployment": {
namespace: "toolchain-member-operator",
name: "member-webhooks",
labelKey: "provider",
labelValue: "codeready-toolchain",
expectedMsg: "deployment \"member-webhooks\" successfully rolled out\n",
labelSelector: "provider=codeready-toolchain",
expectedOutput: "deployment.apps/member-webhooks restarted\n",
},
// "OlmMemberDeployment": {
// namespace: "toolchain-member-operator",
// name: "member-operator-controller-manager",
// labelKey: "kubesaw-control-plane",
// labelValue: "kubesaw-controller-manager",
// expectedMsg: "deployment \"member-operator-controller-manager\" successfully rolled out\n",
// labelSelector: "kubesaw-control-plane=kubesaw-controller-manager",
// },
// "NonOlmMemberDeployment": {
// namespace: "toolchain-member-operator",
// name: "member-webhooks",
// labelKey: "provider",
// labelValue: "codeready-toolchain",
// expectedMsg: "deployment \"member-webhooks\" successfully rolled out\n",
// labelSelector: "provider=codeready-toolchain",
// expectedOutput: "deployment.apps/member-webhooks restarted\n",
// },
}
for k, tc := range tests {
t.Run(k, func(t *testing.T) {
Expand Down Expand Up @@ -131,60 +131,40 @@ func TestRestartDeployment(t *testing.T) {
streams, _, buf, _ := genericclioptions.NewTestIOStreams()
term := NewFakeTerminalWithResponse("Y")
pod := newPod(test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager"))
deployment1.Labels = make(map[string]string)
deployment1.Labels[tc.labelKey] = tc.labelValue
newClient, fakeClient := NewFakeClients(t, deployment1, pod)
ctx := clicontext.NewCommandContext(term, newClient)

//when
err := restartDeployment(ctx, fakeClient, namespacedName.Namespace, tf, streams)
if tc.labelValue == "kubesaw-controller-manager" {
require.NoError(t, err, "non-OLM based deployment not found in")
err2 := deleteAndWaitForPods(ctx, fakeClient, *deployment1, tf, streams)
require.NoError(t, err2)
require.NoError(t, err)
require.Contains(t, term.Output(), "Fetching the current OLM and non-OLM deployments of the operator in")
require.Contains(t, term.Output(), "Proceeding to delete the Pods of")
require.Contains(t, term.Output(), "Listing the pods to be deleted")
require.Contains(t, term.Output(), "Starting to delete the pods")
require.Contains(t, term.Output(), "Checking the status of the deleted pod's deployment")
//checking the output from kubectl for rolloutstatus
require.Contains(t, buf.String(), tc.expectedOutput)
require.Contains(t, term.Output(), "No Non-OLM based deployment restart happend as Non-Olm deployment found in namespace")

Check failure on line 150 in pkg/cmd/adm/restart_test.go

View workflow job for this annotation

GitHub Actions / GolangCI Lint

`happend` is a misspelling of `happened` (misspell)
} else if tc.labelValue == "codeready-toolchain" {
require.NoError(t, err, "OLM based deployment not found in")
err := restartNonOlmDeployments(*deployment1, tf, streams)
require.NoError(t, err)
//checking the output from kubectl
require.Contains(t, term.Output(), "Fetching the current OLM and non-OLM deployments of the operator in")
require.Contains(t, term.Output(), "Proceeding to restart the non-OLM deployment ")
require.Contains(t, term.Output(), "Running the rollout restart command for non-olm deployment")
require.Contains(t, term.Output(), "Checking the status of the rolled out deployment")
//checking the output from kubectl for rolloutstatus
require.Contains(t, buf.String(), tc.expectedOutput)
require.Contains(t, term.Output(), "No OLM based deployment restart happend as Olm deployment found in namespace")
}
err1 := checkRolloutStatus(tf, streams, tc.labelSelector)
require.NoError(t, err1)
//checking the output from kubectl
require.Contains(t, buf.String(), tc.expectedMsg)

})
}
}

func TestRestart(t *testing.T) {
t.Run("restart should fail if more than one clustername", func(t *testing.T) {
//given
toolchainCluster := NewToolchainCluster(ToolchainClusterName("host-cool-server.com"))
deployment := newDeployment(test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager"), 1)
term := NewFakeTerminalWithResponse("Y")
newClient, _ := NewFakeClients(t, toolchainCluster, deployment)
ctx := clicontext.NewCommandContext(term, newClient)

//when
err := restart(ctx, "host-cool-server.com", "member")

//then
require.Error(t, err, "please provide 1 cluster name to restart the operator e.g `ksctl adm restart host`")
})
t.Run("restart should fail if zero clustername", func(t *testing.T) {
//given
toolchainCluster := NewToolchainCluster(ToolchainClusterName("host-cool-server.com"))
deployment := newDeployment(test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager"), 1)
term := NewFakeTerminalWithResponse("Y")
newClient, _ := NewFakeClients(t, toolchainCluster, deployment)
ctx := clicontext.NewCommandContext(term, newClient)

//when
err := restart(ctx)

//then
require.Error(t, err, "please provide 1 cluster name to restart the operator e.g `ksctl adm restart host`")
})
t.Run("restart should succeed with 1 clustername", func(t *testing.T) {
//given
SetFileConfig(t, Host())
Expand Down

0 comments on commit b6f3df1

Please sign in to comment.