Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove the migration step to kubeconfig #426

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 10 additions & 13 deletions controllers/toolchaincluster/healthchecker_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ import (
"fmt"
"testing"

toolchainv1alpha1 "github.com/codeready-toolchain/api/api/v1alpha1"
"github.com/codeready-toolchain/toolchain-common/pkg/cluster"
"github.com/codeready-toolchain/toolchain-common/pkg/test"
"github.com/stretchr/testify/require"
Expand All @@ -14,21 +13,20 @@ import (
)

func TestClusterHealthChecks(t *testing.T) {

// given
defer gock.Off()
tcNs := "test-namespace"
gock.New("http://cluster.com").
gock.New("https://cluster.com").
Get("healthz").
Persist().
Reply(200).
BodyString("ok")
gock.New("http://unstable.com").
gock.New("https://unstable.com").
Get("healthz").
Persist().
Reply(200).
BodyString("unstable")
gock.New("http://not-found.com").
gock.New("https://not-found.com").
Get("healthz").
Persist().
Reply(404)
Expand All @@ -41,25 +39,25 @@ func TestClusterHealthChecks(t *testing.T) {
}{
"HealthOkay": {
tcType: "stable",
apiEndPoint: "http://cluster.com",
apiEndPoint: "https://cluster.com",
healthCheck: true,
},
"HealthNotOkayButNoError": {
tcType: "unstable",
apiEndPoint: "http://unstable.com",
apiEndPoint: "https://unstable.com",
healthCheck: false,
},
"ErrorWhileDoingHealth": {
tcType: "Notfound",
apiEndPoint: "http://not-found.com",
apiEndPoint: "https://not-found.com",
healthCheck: false,
err: fmt.Errorf("the server could not find the requested resource"),
},
}
for k, tc := range tests {
t.Run(k, func(t *testing.T) {
//given
tcType, sec := newToolchainCluster(tc.tcType, tcNs, tc.apiEndPoint, toolchainv1alpha1.ToolchainClusterStatus{})
// given
tcType, sec := newToolchainCluster(t, tc.tcType, tcNs, tc.apiEndPoint)
cl := test.NewFakeClient(t, tcType, sec)
reset := setupCachedClusters(t, cl, tcType)
defer reset()
Expand All @@ -68,17 +66,16 @@ func TestClusterHealthChecks(t *testing.T) {
cacheClient, err := kubeclientset.NewForConfig(cachedTC.RestConfig)
require.NoError(t, err)

//when
// when
healthCheck, err := getClusterHealthStatus(context.TODO(), cacheClient)

//then
// then
require.Equal(t, tc.healthCheck, healthCheck)
if tc.err != nil {
require.EqualError(t, err, tc.err.Error())
} else {
require.NoError(t, err)
}

})
}
}
66 changes: 0 additions & 66 deletions controllers/toolchaincluster/toolchaincluster_controller.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
package toolchaincluster

import (
"bytes"
"context"
"fmt"
"time"
Expand All @@ -13,8 +12,6 @@ import (
kerrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
kubeclientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
Expand Down Expand Up @@ -66,10 +63,6 @@ func (r *Reconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.
return reconcile.Result{}, err
}

if err = r.migrateSecretToKubeConfig(ctx, toolchainCluster); err != nil {
return reconcile.Result{}, err
}

clientSet, err := kubeclientset.NewForConfig(cachedCluster.RestConfig)
if err != nil {
reqLogger.Error(err, "cannot create ClientSet for the ToolchainCluster")
Expand Down Expand Up @@ -151,62 +144,3 @@ func clusterNotReadyCondition() toolchainv1alpha1.Condition {
Message: healthzNotOk,
}
}

func (r *Reconciler) migrateSecretToKubeConfig(ctx context.Context, tc *toolchainv1alpha1.ToolchainCluster) error {
if len(tc.Spec.SecretRef.Name) == 0 {
return nil
}

secret := &corev1.Secret{}
if err := r.Client.Get(ctx, client.ObjectKey{Name: tc.Spec.SecretRef.Name, Namespace: tc.Namespace}, secret); err != nil {
return err
}

token := secret.Data["token"]
apiEndpoint := tc.Spec.APIEndpoint
operatorNamespace := tc.Labels["namespace"]
insecureTls := len(tc.Spec.DisabledTLSValidations) == 1 && tc.Spec.DisabledTLSValidations[0] == "*"
// we ignore the Spec.CABundle here because we don't want it migrated. The new configurations are free
// to use the certificate data for the connections but we don't want to migrate the existing certificates.
kubeConfig := composeKubeConfigFromData(token, apiEndpoint, operatorNamespace, insecureTls)

data, err := clientcmd.Write(kubeConfig)
if err != nil {
return err
}

origKubeConfigData := secret.Data["kubeconfig"]
secret.Data["kubeconfig"] = data

if !bytes.Equal(origKubeConfigData, data) {
if err = r.Client.Update(ctx, secret); err != nil {
return err
}
}

return nil
}

func composeKubeConfigFromData(token []byte, apiEndpoint, operatorNamespace string, insecureTls bool) clientcmdapi.Config {
return clientcmdapi.Config{
Contexts: map[string]*clientcmdapi.Context{
"ctx": {
Cluster: "cluster",
Namespace: operatorNamespace,
AuthInfo: "auth",
},
},
CurrentContext: "ctx",
Clusters: map[string]*clientcmdapi.Cluster{
"cluster": {
Server: apiEndpoint,
InsecureSkipTLSVerify: insecureTls,
},
},
AuthInfos: map[string]*clientcmdapi.AuthInfo{
"auth": {
Token: string(token),
},
},
}
}
77 changes: 13 additions & 64 deletions controllers/toolchaincluster/toolchaincluster_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,17 +9,12 @@ import (
toolchainv1alpha1 "github.com/codeready-toolchain/api/api/v1alpha1"
"github.com/codeready-toolchain/toolchain-common/pkg/cluster"
"github.com/codeready-toolchain/toolchain-common/pkg/test"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gopkg.in/h2non/gock.v1"
"gotest.tools/assert/cmp"
corev1 "k8s.io/api/core/v1"
kubeclientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
Expand All @@ -32,24 +27,24 @@ func TestClusterControllerChecks(t *testing.T) {

defer gock.Off()
tcNs := "test-namespace"
gock.New("http://cluster.com").
gock.New("https://cluster.com").
Get("healthz").
Persist().
Reply(200).
BodyString("ok")
gock.New("http://unstable.com").
gock.New("https://unstable.com").
Get("healthz").
Persist().
Reply(200).
BodyString("unstable")
gock.New("http://not-found.com").
gock.New("https://not-found.com").
Get("healthz").
Persist().
Reply(404)

t.Run("ToolchainCluster not found", func(t *testing.T) {
// given
NotFound, sec := newToolchainCluster("notfound", tcNs, "http://not-found.com", toolchainv1alpha1.ToolchainClusterStatus{})
NotFound, sec := newToolchainCluster(t, "notfound", tcNs, "http://not-found.com")

cl := test.NewFakeClient(t, sec)
reset := setupCachedClusters(t, cl, NotFound)
Expand All @@ -66,7 +61,7 @@ func TestClusterControllerChecks(t *testing.T) {

t.Run("Error while getting ToolchainCluster", func(t *testing.T) {
// given
tc, sec := newToolchainCluster("tc", tcNs, "http://tc.com", toolchainv1alpha1.ToolchainClusterStatus{})
tc, sec := newToolchainCluster(t, "tc", tcNs, "http://tc.com")

cl := test.NewFakeClient(t, sec)
cl.MockGet = func(ctx context.Context, key runtimeclient.ObjectKey, obj runtimeclient.Object, opts ...runtimeclient.GetOption) error {
Expand All @@ -87,7 +82,7 @@ func TestClusterControllerChecks(t *testing.T) {

t.Run("reconcile successful and requeued", func(t *testing.T) {
// given
stable, sec := newToolchainCluster("stable", tcNs, "http://cluster.com", toolchainv1alpha1.ToolchainClusterStatus{})
stable, sec := newToolchainCluster(t, "stable", tcNs, "https://cluster.com")

cl := test.NewFakeClient(t, stable, sec)
reset := setupCachedClusters(t, cl, stable)
Expand All @@ -106,7 +101,7 @@ func TestClusterControllerChecks(t *testing.T) {

t.Run("toolchain cluster cache not found", func(t *testing.T) {
// given
unstable, _ := newToolchainCluster("unstable", tcNs, "http://unstable.com", toolchainv1alpha1.ToolchainClusterStatus{})
unstable, _ := newToolchainCluster(t, "unstable", tcNs, "http://unstable.com")

cl := test.NewFakeClient(t, unstable)
controller, req := prepareReconcile(unstable, cl, requeAfter)
Expand All @@ -121,7 +116,7 @@ func TestClusterControllerChecks(t *testing.T) {

t.Run("error while updating a toolchain cluster status on cache not found", func(t *testing.T) {
// given
stable, _ := newToolchainCluster("stable", tcNs, "http://cluster.com", toolchainv1alpha1.ToolchainClusterStatus{})
stable, _ := newToolchainCluster(t, "stable", tcNs, "https://cluster.com")

cl := test.NewFakeClient(t, stable)
cl.MockStatusUpdate = func(ctx context.Context, obj runtimeclient.Object, opts ...runtimeclient.UpdateOption) error {
Expand All @@ -141,7 +136,7 @@ func TestClusterControllerChecks(t *testing.T) {

t.Run("error while updating a toolchain cluster status when health-check failed", func(t *testing.T) {
// given
stable, sec := newToolchainCluster("stable", tcNs, "http://cluster.com", toolchainv1alpha1.ToolchainClusterStatus{})
stable, sec := newToolchainCluster(t, "stable", tcNs, "https://cluster.com")
expectedErr := fmt.Errorf("my test error")
cl := test.NewFakeClient(t, stable, sec)
cl.MockStatusUpdate = func(ctx context.Context, obj runtimeclient.Object, opts ...runtimeclient.UpdateOption) error {
Expand All @@ -162,46 +157,12 @@ func TestClusterControllerChecks(t *testing.T) {
require.Equal(t, reconcile.Result{}, recResult)
assertClusterStatus(t, cl, "stable")
})

t.Run("migrates connection settings to kubeconfig in secret", func(t *testing.T) {
// given
tc, secret := newToolchainCluster("tc", tcNs, "http://cluster.com", toolchainv1alpha1.ToolchainClusterStatus{})
cl := test.NewFakeClient(t, tc, secret)
reset := setupCachedClusters(t, cl, tc)
defer reset()

controller, req := prepareReconcile(tc, cl, requeAfter)
expectedKubeConfig := composeKubeConfigFromData([]byte("mycooltoken"), "http://cluster.com", "test-namespace", true)

// when
_, err := controller.Reconcile(context.TODO(), req)
secretAfterReconcile := &corev1.Secret{}
require.NoError(t, cl.Get(context.TODO(), runtimeclient.ObjectKeyFromObject(secret), secretAfterReconcile))
actualKubeConfig, loadErr := clientcmd.Load(secretAfterReconcile.Data["kubeconfig"])

// then
require.NoError(t, err)
require.NoError(t, loadErr)
assert.Contains(t, secretAfterReconcile.Data, "kubeconfig")

// we need to use this more complex equals, because we don't initialize the Extension fields (i.e. they're nil)
// while they're initialized and empty after deserialization, which causes the "normal" deep equals to fail.
result := cmp.DeepEqual(expectedKubeConfig, *actualKubeConfig,
cmpopts.IgnoreFields(clientcmdapi.Config{}, "Extensions"),
cmpopts.IgnoreFields(clientcmdapi.Preferences{}, "Extensions"),
cmpopts.IgnoreFields(clientcmdapi.Cluster{}, "Extensions"),
cmpopts.IgnoreFields(clientcmdapi.AuthInfo{}, "Extensions"),
cmpopts.IgnoreFields(clientcmdapi.Context{}, "Extensions"),
)()

assert.True(t, result.Success())
})
}

func TestGetClusterHealth(t *testing.T) {
t.Run("Check health default", func(t *testing.T) {
// given
stable, sec := newToolchainCluster("stable", "test-namespace", "http://cluster.com", toolchainv1alpha1.ToolchainClusterStatus{})
stable, sec := newToolchainCluster(t, "stable", "test-namespace", "https://cluster.com")

cl := test.NewFakeClient(t, stable, sec)
reset := setupCachedClusters(t, cl, stable)
Expand All @@ -222,7 +183,7 @@ func TestGetClusterHealth(t *testing.T) {
})
t.Run("get health condition when health obtained is false ", func(t *testing.T) {
// given
stable, sec := newToolchainCluster("stable", "test-namespace", "http://cluster.com", toolchainv1alpha1.ToolchainClusterStatus{})
stable, sec := newToolchainCluster(t, "stable", "test-namespace", "https://cluster.com")

cl := test.NewFakeClient(t, stable, sec)
reset := setupCachedClusters(t, cl, stable)
Expand All @@ -242,18 +203,6 @@ func TestGetClusterHealth(t *testing.T) {
assertClusterStatus(t, cl, "stable", clusterNotReadyCondition())
})
}
func TestComposeKubeConfig(t *testing.T) {
// when
kubeConfig := composeKubeConfigFromData([]byte("token"), "http://over.the.rainbow", "the-namespace", false)

// then
context := kubeConfig.Contexts[kubeConfig.CurrentContext]

assert.Equal(t, "token", kubeConfig.AuthInfos[context.AuthInfo].Token)
assert.Equal(t, "http://over.the.rainbow", kubeConfig.Clusters[context.Cluster].Server)
assert.Equal(t, "the-namespace", context.Namespace)
assert.False(t, kubeConfig.Clusters[context.Cluster].InsecureSkipTLSVerify)
}

func setupCachedClusters(t *testing.T, cl *test.FakeClient, clusters ...*toolchainv1alpha1.ToolchainCluster) func() {
service := cluster.NewToolchainClusterServiceWithClient(cl, logf.Log, test.MemberOperatorNs, 0, func(config *rest.Config, options runtimeclient.Options) (runtimeclient.Client, error) {
Expand All @@ -275,8 +224,8 @@ func setupCachedClusters(t *testing.T, cl *test.FakeClient, clusters ...*toolcha
}
}

func newToolchainCluster(name, tcNs string, apiEndpoint string, status toolchainv1alpha1.ToolchainClusterStatus) (*toolchainv1alpha1.ToolchainCluster, *corev1.Secret) {
toolchainCluster, secret := test.NewToolchainClusterWithEndpoint(name, tcNs, "secret", apiEndpoint, status, map[string]string{"namespace": "test-namespace"})
func newToolchainCluster(t *testing.T, name, tcNs string, apiEndpoint string) (*toolchainv1alpha1.ToolchainCluster, *corev1.Secret) {
toolchainCluster, secret := test.NewToolchainClusterWithEndpoint(t, name, tcNs, "test-namespace", "secret", apiEndpoint, toolchainv1alpha1.ToolchainClusterStatus{}, false)
return toolchainCluster, secret
}

Expand Down
Loading
Loading