From f00f37d70ae38a8b53d28650b0398ea4d097ec7d Mon Sep 17 00:00:00 2001 From: Wallace Breza Date: Fri, 17 Nov 2023 15:51:15 -0800 Subject: [PATCH] AKS Service Target - Overriding KUBECONFIG handling (#2488) Adds support for custom KUBECONFIG setup. --- cli/azd/pkg/project/service_config_test.go | 5 +- cli/azd/pkg/project/service_target_aks.go | 283 +++++++++++------- .../pkg/project/service_target_aks_test.go | 147 +++++++-- cli/azd/pkg/tools/azcli/managed_clusters.go | 27 ++ cli/azd/pkg/tools/kubectl/kube_config.go | 41 +-- cli/azd/pkg/tools/kubectl/kube_config_test.go | 19 +- cli/azd/pkg/tools/kubectl/kubectl.go | 22 +- cli/azd/pkg/tools/kubectl/models.go | 6 +- cli/azd/pkg/tools/kubectl/util.go | 5 +- 9 files changed, 375 insertions(+), 180 deletions(-) diff --git a/cli/azd/pkg/project/service_config_test.go b/cli/azd/pkg/project/service_config_test.go index a65535b3f0e..408e6740518 100644 --- a/cli/azd/pkg/project/service_config_test.go +++ b/cli/azd/pkg/project/service_config_test.go @@ -217,8 +217,9 @@ func createTestServiceConfig(path string, host ServiceTargetKind, language Servi Language: language, RelativePath: filepath.Join(path), Project: &ProjectConfig{ - Name: "Test-App", - Path: ".", + Name: "Test-App", + Path: ".", + EventDispatcher: ext.NewEventDispatcher[ProjectLifecycleEventArgs](), }, EventDispatcher: ext.NewEventDispatcher[ServiceLifecycleEventArgs](), } diff --git a/cli/azd/pkg/project/service_target_aks.go b/cli/azd/pkg/project/service_target_aks.go index 73f16c6b484..bb3414dde9c 100644 --- a/cli/azd/pkg/project/service_target_aks.go +++ b/cli/azd/pkg/project/service_target_aks.go @@ -13,6 +13,9 @@ import ( "github.com/azure/azure-dev/cli/azd/pkg/async" "github.com/azure/azure-dev/cli/azd/pkg/azure" "github.com/azure/azure-dev/cli/azd/pkg/environment" + "github.com/azure/azure-dev/cli/azd/pkg/ext" + "github.com/azure/azure-dev/cli/azd/pkg/input" + "github.com/azure/azure-dev/cli/azd/pkg/output" "github.com/azure/azure-dev/cli/azd/pkg/tools" "github.com/azure/azure-dev/cli/azd/pkg/tools/azcli" "github.com/azure/azure-dev/cli/azd/pkg/tools/kubectl" @@ -55,7 +58,9 @@ type AksServiceOptions struct { type aksTarget struct { env *environment.Environment envManager environment.Manager + console input.Console managedClustersService azcli.ManagedClustersService + resourceManager ResourceManager kubectl kubectl.KubectlCli containerHelper *ContainerHelper } @@ -64,14 +69,18 @@ type aksTarget struct { func NewAksTarget( env *environment.Environment, envManager environment.Manager, + console input.Console, managedClustersService azcli.ManagedClustersService, + resourceManager ResourceManager, kubectlCli kubectl.KubectlCli, containerHelper *ContainerHelper, ) ServiceTarget { return &aksTarget{ env: env, envManager: envManager, + console: console, managedClustersService: managedClustersService, + resourceManager: resourceManager, kubectl: kubectlCli, containerHelper: containerHelper, } @@ -88,9 +97,29 @@ func (t *aksTarget) RequiredExternalTools(ctx context.Context) []tools.ExternalT // Initializes the AKS service target func (t *aksTarget) Initialize(ctx context.Context, serviceConfig *ServiceConfig) error { - // TODO: At some point in the future this an opportunity for the AKS target to - // subscript to a post-provision event to allow additional cluster configuration - // outside of the bicep provisioning. + // Ensure that the k8s context has been configured by the time a deploy operation is performed. + // We attach to "postprovision" so that any predeploy or postprovision hooks can take advantage of the configuration + err := serviceConfig.Project.AddHandler( + "postprovision", + func(ctx context.Context, args ProjectLifecycleEventArgs) error { + return t.setK8sContext(ctx, serviceConfig, "postprovision") + }, + ) + + if err != nil { + return fmt.Errorf("failed adding postprovision handler, %w", err) + } + + // Ensure that the k8s context has been configured by the time a deploy operation is performed. + // We attach to "predeploy" so that any predeploy hooks can take advantage of the configuration + err = serviceConfig.AddHandler("predeploy", func(ctx context.Context, args ServiceLifecycleEventArgs) error { + return t.setK8sContext(ctx, serviceConfig, "predeploy") + }) + + if err != nil { + return fmt.Errorf("failed adding predeploy handler, %w", err) + } + return nil } @@ -126,92 +155,23 @@ func (t *aksTarget) Deploy( return } - // Login to AKS cluster - clusterName, has := t.env.LookupEnv(environment.AksClusterEnvVarName) - if !has { - task.SetError(fmt.Errorf( - "could not determine AKS cluster, ensure %s is set as an output of your infrastructure", - environment.AksClusterEnvVarName, - )) - return - } - - log.Printf("getting AKS credentials for cluster '%s'\n", clusterName) - task.SetProgress(NewServiceProgress("Getting AKS credentials")) - clusterCreds, err := t.managedClustersService.GetAdminCredentials( - ctx, - targetResource.SubscriptionId(), - targetResource.ResourceGroupName(), - clusterName, - ) - if err != nil { - task.SetError(fmt.Errorf( - "failed retrieving cluster admin credentials. Ensure your cluster has been configured to support admin credentials, %w", - err, - )) - return - } - - if len(clusterCreds.Kubeconfigs) == 0 { - task.SetError(fmt.Errorf( - "cluster credentials is empty. Ensure your cluster has been configured to support admin credentials. , %w", - err, - )) - return - } - - // The kubeConfig that we care about will also be at position 0 - // I don't know if there is a valid use case where this credential results would container multiple configs - task.SetProgress(NewServiceProgress("Configuring k8s config context")) - err = t.configureK8sContext(ctx, clusterName, clusterCreds.Kubeconfigs[0]) - if err != nil { - task.SetError(err) - return - } - // Login, tag & push container image to ACR containerDeployTask := t.containerHelper.Deploy(ctx, serviceConfig, packageOutput, targetResource) syncProgress(task, containerDeployTask.Progress()) - _, err = containerDeployTask.Await() - if err != nil { - task.SetError(err) - return - } - - namespace := t.getK8sNamespace(serviceConfig) - - task.SetProgress(NewServiceProgress("Creating k8s namespace")) - namespaceResult, err := t.kubectl.CreateNamespace( - ctx, - namespace, - &kubectl.KubeCliFlags{ - DryRun: kubectl.DryRunTypeClient, - Output: kubectl.OutputTypeYaml, - }, - ) - if err != nil { - task.SetError(fmt.Errorf("failed creating kube namespace: %w", err)) - return - } - - _, err = t.kubectl.ApplyWithStdIn(ctx, namespaceResult.Stdout, nil) - if err != nil { - task.SetError(fmt.Errorf("failed applying kube namespace: %w", err)) - return - } + // Sync environment + t.kubectl.SetEnv(t.env.Dotenv()) task.SetProgress(NewServiceProgress("Applying k8s manifests")) - t.kubectl.SetEnv(t.env.Dotenv()) deploymentPath := serviceConfig.K8s.DeploymentPath if deploymentPath == "" { deploymentPath = defaultDeploymentPath } - err = t.kubectl.Apply( + err := t.kubectl.Apply( ctx, filepath.Join(serviceConfig.RelativePath, deploymentPath), - &kubectl.KubeCliFlags{Namespace: namespace}, + nil, ) if err != nil { task.SetError(fmt.Errorf("failed applying kube manifests: %w", err)) @@ -226,7 +186,7 @@ func (t *aksTarget) Deploy( // It is not a requirement for a AZD deploy to contain a deployment object // If we don't find any deployment within the namespace we will continue task.SetProgress(NewServiceProgress("Verifying deployment")) - deployment, err := t.waitForDeployment(ctx, namespace, deploymentName) + deployment, err := t.waitForDeployment(ctx, deploymentName) if err != nil && !errors.Is(err, kubectl.ErrResourceNotFound) { task.SetError(err) return @@ -271,8 +231,6 @@ func (t *aksTarget) Endpoints( serviceConfig *ServiceConfig, targetResource *environment.TargetResource, ) ([]string, error) { - namespace := t.getK8sNamespace(serviceConfig) - serviceName := serviceConfig.K8s.Service.Name if serviceName == "" { serviceName = serviceConfig.Name @@ -285,14 +243,14 @@ func (t *aksTarget) Endpoints( // Find endpoints for any matching services // These endpoints would typically be internal cluster accessible endpoints - serviceEndpoints, err := t.getServiceEndpoints(ctx, serviceConfig, namespace, serviceName) + serviceEndpoints, err := t.getServiceEndpoints(ctx, serviceConfig, serviceName) if err != nil && !errors.Is(err, kubectl.ErrResourceNotFound) { return nil, fmt.Errorf("failed retrieving service endpoints, %w", err) } // Find endpoints for any matching ingress controllers // These endpoints would typically be publicly accessible endpoints - ingressEndpoints, err := t.getIngressEndpoints(ctx, serviceConfig, namespace, ingressName) + ingressEndpoints, err := t.getIngressEndpoints(ctx, serviceConfig, ingressName) if err != nil && !errors.Is(err, kubectl.ErrResourceNotFound) { return nil, fmt.Errorf("failed retrieving ingress endpoints, %w", err) } @@ -314,46 +272,114 @@ func (t *aksTarget) validateTargetResource( return nil } +func (t *aksTarget) ensureClusterContext( + ctx context.Context, + serviceConfig *ServiceConfig, + targetResource *environment.TargetResource, + defaultNamespace string, +) (string, error) { + kubeConfigPath := t.env.Getenv(kubectl.KubeConfigEnvVarName) + if kubeConfigPath != "" { + return kubeConfigPath, nil + } + + // Login to AKS cluster + clusterName, has := t.env.LookupEnv(environment.AksClusterEnvVarName) + if !has { + return "", fmt.Errorf( + "could not determine AKS cluster, ensure %s is set as an output of your infrastructure", + environment.AksClusterEnvVarName, + ) + } + + log.Printf("getting AKS credentials for cluster '%s'\n", clusterName) + clusterCreds, err := t.managedClustersService.GetUserCredentials( + ctx, + targetResource.SubscriptionId(), + targetResource.ResourceGroupName(), + clusterName, + ) + if err != nil { + return "", fmt.Errorf( + "failed retrieving cluster admin credentials. Ensure your cluster has been configured to support admin credentials, %w", + err, + ) + } + + if len(clusterCreds.Kubeconfigs) == 0 { + return "", fmt.Errorf( + "cluster credentials is empty. Ensure your cluster has been configured to support admin credentials. , %w", + err, + ) + } + + // The kubeConfig that we care about will also be at position 0 + // I don't know if there is a valid use case where this credential results would container multiple configs + kubeConfigPath, err = t.configureK8sContext(ctx, clusterName, defaultNamespace, clusterCreds.Kubeconfigs[0]) + if err != nil { + return "", err + } + + return kubeConfigPath, nil +} + +// Ensures the k8s namespace exists otherwise creates it +func (t *aksTarget) ensureNamespace(ctx context.Context, namespace string) error { + namespaceResult, err := t.kubectl.CreateNamespace( + ctx, + namespace, + &kubectl.KubeCliFlags{ + DryRun: kubectl.DryRunTypeClient, + Output: kubectl.OutputTypeYaml, + }, + ) + if err != nil { + return fmt.Errorf("failed creating kube namespace: %w", err) + } + + _, err = t.kubectl.ApplyWithStdIn(ctx, namespaceResult.Stdout, nil) + if err != nil { + return fmt.Errorf("failed applying kube namespace: %w", err) + } + + return nil +} + func (t *aksTarget) configureK8sContext( ctx context.Context, clusterName string, + namespace string, credentialResult *armcontainerservice.CredentialResult, -) error { +) (string, error) { kubeConfigManager, err := kubectl.NewKubeConfigManager(t.kubectl) if err != nil { - return err + return "", err } kubeConfig, err := kubectl.ParseKubeConfig(ctx, credentialResult.Value) if err != nil { - return fmt.Errorf( + return "", fmt.Errorf( "failed parsing kube config. Ensure your configuration is valid yaml. %w", err, ) } - if err := kubeConfigManager.SaveKubeConfig(ctx, clusterName, kubeConfig); err != nil { - return fmt.Errorf( - "failed saving kube config. Ensure write permissions to your local ~/.kube directory. %w", - err, - ) - } - - if err := kubeConfigManager.MergeConfigs(ctx, "config", "config", clusterName); err != nil { - return fmt.Errorf( - "failed merging kube configs. Verify your k8s configuration is valid. %w", - err, - ) + // Set default namespace for the context + // This avoids having to specify the namespace for every kubectl command + kubeConfig.Contexts[0].Context.Namespace = namespace + kubeConfigPath, err := kubeConfigManager.AddOrUpdateContext(ctx, clusterName, kubeConfig) + if err != nil { + return "", fmt.Errorf("failed adding/updating kube context, %w", err) } if _, err := t.kubectl.ConfigUseContext(ctx, clusterName, nil); err != nil { - return fmt.Errorf( + return "", fmt.Errorf( "failed setting kube context '%s'. Ensure the specified context exists. %w", clusterName, err, ) } - return nil + return kubeConfigPath, nil } // Finds a deployment using the specified deploymentNameFilter string @@ -361,13 +387,12 @@ func (t *aksTarget) configureK8sContext( // Additionally confirms rollout is complete by checking the rollout status func (t *aksTarget) waitForDeployment( ctx context.Context, - namespace string, deploymentNameFilter string, ) (*kubectl.Deployment, error) { // The deployment can appear like it has succeeded when a previous deployment // was already in place. deployment, err := kubectl.WaitForResource( - ctx, t.kubectl, namespace, kubectl.ResourceTypeDeployment, + ctx, t.kubectl, kubectl.ResourceTypeDeployment, func(deployment *kubectl.Deployment) bool { return strings.Contains(deployment.Metadata.Name, deploymentNameFilter) }, @@ -382,9 +407,7 @@ func (t *aksTarget) waitForDeployment( // Check the rollout status // This can be a long operation when the deployment is in a failed state such as an ImagePullBackOff loop - _, err = t.kubectl.RolloutStatus(ctx, deployment.Metadata.Name, &kubectl.KubeCliFlags{ - Namespace: namespace, - }) + _, err = t.kubectl.RolloutStatus(ctx, deployment.Metadata.Name, nil) if err != nil { return nil, err } @@ -396,11 +419,10 @@ func (t *aksTarget) waitForDeployment( // Waits until the ingress LoadBalancer has assigned a valid IP address func (t *aksTarget) waitForIngress( ctx context.Context, - namespace string, ingressNameFilter string, ) (*kubectl.Ingress, error) { return kubectl.WaitForResource( - ctx, t.kubectl, namespace, kubectl.ResourceTypeIngress, + ctx, t.kubectl, kubectl.ResourceTypeIngress, func(ingress *kubectl.Ingress) bool { return strings.Contains(ingress.Metadata.Name, ingressNameFilter) }, @@ -420,11 +442,10 @@ func (t *aksTarget) waitForIngress( // Waits until the service is available func (t *aksTarget) waitForService( ctx context.Context, - namespace string, serviceNameFilter string, ) (*kubectl.Service, error) { return kubectl.WaitForResource( - ctx, t.kubectl, namespace, kubectl.ResourceTypeService, + ctx, t.kubectl, kubectl.ResourceTypeService, func(service *kubectl.Service) bool { return strings.Contains(service.Metadata.Name, serviceNameFilter) }, @@ -448,15 +469,14 @@ func (t *aksTarget) waitForService( ) } -// Retrieve any service endpoints for the specified namespace and serviceNameFilter +// Retrieve any service endpoints for the specified serviceNameFilter // Supports service types for LoadBalancer and ClusterIP func (t *aksTarget) getServiceEndpoints( ctx context.Context, serviceConfig *ServiceConfig, - namespace string, serviceNameFilter string, ) ([]string, error) { - service, err := t.waitForService(ctx, namespace, serviceNameFilter) + service, err := t.waitForService(ctx, serviceNameFilter) if err != nil { return nil, err } @@ -475,15 +495,14 @@ func (t *aksTarget) getServiceEndpoints( return endpoints, nil } -// Retrieve any ingress endpoints for the specified namespace and serviceNameFilter +// Retrieve any ingress endpoints for the specified serviceNameFilter // Supports service types for LoadBalancer, supports Hosts and/or IP address func (t *aksTarget) getIngressEndpoints( ctx context.Context, serviceConfig *ServiceConfig, - namespace string, resourceFilter string, ) ([]string, error) { - ingress, err := t.waitForIngress(ctx, namespace, resourceFilter) + ingress, err := t.waitForIngress(ctx, resourceFilter) if err != nil { return nil, err } @@ -501,7 +520,7 @@ func (t *aksTarget) getIngressEndpoints( if ingress.Spec.Rules[index].Host == nil { baseUrl = fmt.Sprintf("%s://%s", protocol, resource.Ip) } else { - baseUrl = fmt.Sprintf("%s://%s", *ingress.Spec.Rules[index].Host, resource.Ip) + baseUrl = fmt.Sprintf("%s://%s", protocol, *ingress.Spec.Rules[index].Host) } endpointUrl, err := url.JoinPath(baseUrl, serviceConfig.K8s.Ingress.RelativePath) @@ -523,3 +542,39 @@ func (t *aksTarget) getK8sNamespace(serviceConfig *ServiceConfig) string { return namespace } + +func (t *aksTarget) setK8sContext(ctx context.Context, serviceConfig *ServiceConfig, eventName ext.Event) error { + t.kubectl.SetEnv(t.env.Dotenv()) + hasCustomKubeConfig := false + + // If a KUBECONFIG env var is set, use it. + kubeConfigPath := t.env.Getenv(kubectl.KubeConfigEnvVarName) + if kubeConfigPath != "" { + t.kubectl.SetKubeConfig(kubeConfigPath) + hasCustomKubeConfig = true + } + + targetResource, err := t.resourceManager.GetTargetResource(ctx, t.env.GetSubscriptionId(), serviceConfig) + if err != nil { + return err + } + + defaultNamespace := t.getK8sNamespace(serviceConfig) + _, err = t.ensureClusterContext(ctx, serviceConfig, targetResource, defaultNamespace) + if err != nil { + return err + } + + err = t.ensureNamespace(ctx, defaultNamespace) + if err != nil { + return err + } + + // Display message to the user when we detect they are using a non-default KUBECONFIG configuration + // In standard AZD AKS deployment users should not typically need to set a custom KUBECONFIG + if hasCustomKubeConfig && eventName == "predeploy" { + t.console.Message(ctx, output.WithWarningFormat("Using KUBECONFIG @ %s\n", kubeConfigPath)) + } + + return nil +} diff --git a/cli/azd/pkg/project/service_target_aks_test.go b/cli/azd/pkg/project/service_target_aks_test.go index a3667de35d1..f5a72b8b459 100644 --- a/cli/azd/pkg/project/service_target_aks_test.go +++ b/cli/azd/pkg/project/service_target_aks_test.go @@ -29,12 +29,16 @@ import ( "github.com/azure/azure-dev/cli/azd/test/mocks/mockenv" "github.com/azure/azure-dev/cli/azd/test/ostest" "github.com/benbjohnson/clock" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "gopkg.in/yaml.v3" ) func Test_NewAksTarget(t *testing.T) { mockContext := mocks.NewMockContext(context.Background()) + err := setupMocksForAksTarget(mockContext) + require.NoError(t, err) + serviceConfig := createTestServiceConfig("./src/api", AksTarget, ServiceLanguageTypeScript) env := createEnv() @@ -75,6 +79,9 @@ func Test_Package_Deploy_HappyPath(t *testing.T) { env := createEnv() serviceTarget := createAksServiceTarget(mockContext, serviceConfig, env) + err = simulateInitliaze(*mockContext.Context, serviceTarget, serviceConfig) + require.NoError(t, err) + err = setupK8sManifests(t, serviceConfig) require.NoError(t, err) @@ -125,24 +132,12 @@ func Test_Deploy_No_Cluster_Name(t *testing.T) { env.DotenvDelete(environment.AksClusterEnvVarName) serviceTarget := createAksServiceTarget(mockContext, serviceConfig, env) - scope := environment.NewTargetResource("SUB_ID", "RG_ID", "CLUSTER_NAME", string(infra.AzureResourceTypeManagedCluster)) - packageOutput := &ServicePackageResult{ - Build: &ServiceBuildResult{BuildOutputPath: "IMAGE_ID"}, - Details: &dockerPackageResult{ - ImageTag: "IMAGE_TAG", - }, - } - - deployTask := serviceTarget.Deploy(*mockContext.Context, serviceConfig, packageOutput, scope) - logProgress(deployTask) - - deployResult, err := deployTask.Await() + err = simulateInitliaze(*mockContext.Context, serviceTarget, serviceConfig) require.Error(t, err) require.ErrorContains(t, err, "could not determine AKS cluster") - require.Nil(t, deployResult) } -func Test_Deploy_No_Admin_Credentials(t *testing.T) { +func Test_Deploy_No_Credentials(t *testing.T) { tempDir := t.TempDir() ostest.Chdir(t, tempDir) @@ -152,28 +147,16 @@ func Test_Deploy_No_Admin_Credentials(t *testing.T) { // Simulate list credentials fail. // For more secure clusters getting admin credentials can fail - err = setupListClusterAdminCredentialsMock(mockContext, http.StatusUnauthorized) + err = setupListClusterUserCredentialsMock(mockContext, http.StatusUnauthorized) require.NoError(t, err) serviceConfig := createTestServiceConfig(tempDir, AksTarget, ServiceLanguageTypeScript) env := createEnv() serviceTarget := createAksServiceTarget(mockContext, serviceConfig, env) - scope := environment.NewTargetResource("SUB_ID", "RG_ID", "CLUSTER_NAME", string(infra.AzureResourceTypeManagedCluster)) - packageOutput := &ServicePackageResult{ - Build: &ServiceBuildResult{BuildOutputPath: "IMAGE_ID"}, - Details: &dockerPackageResult{ - ImageTag: "IMAGE_TAG", - }, - } - - deployTask := serviceTarget.Deploy(*mockContext.Context, serviceConfig, packageOutput, scope) - logProgress(deployTask) - deployResult, err := deployTask.Await() - + err = simulateInitliaze(*mockContext.Context, serviceTarget, serviceConfig) require.Error(t, err) require.ErrorContains(t, err, "failed retrieving cluster admin credentials") - require.Nil(t, deployResult) } func setupK8sManifests(t *testing.T, serviceConfig *ServiceConfig) error { @@ -197,6 +180,11 @@ func setupMocksForAksTarget(mockContext *mocks.MockContext) error { return err } + err = setupListClusterUserCredentialsMock(mockContext, http.StatusOK) + if err != nil { + return err + } + setupMocksForAcr(mockContext) setupMocksForKubectl(mockContext) setupMocksForDocker(mockContext) @@ -234,6 +222,36 @@ func setupListClusterAdminCredentialsMock(mockContext *mocks.MockContext, status return nil } +func setupListClusterUserCredentialsMock(mockContext *mocks.MockContext, statusCode int) error { + kubeConfig := createTestCluster("cluster1", "user1") + kubeConfigBytes, err := yaml.Marshal(kubeConfig) + if err != nil { + return err + } + + // Get Admin cluster credentials + mockContext.HttpClient.When(func(request *http.Request) bool { + return request.Method == http.MethodPost && strings.Contains(request.URL.Path, "listClusterUserCredential") + }).RespondFn(func(request *http.Request) (*http.Response, error) { + creds := armcontainerservice.CredentialResults{ + Kubeconfigs: []*armcontainerservice.CredentialResult{ + { + Name: convert.RefOf("context"), + Value: kubeConfigBytes, + }, + }, + } + + if statusCode == http.StatusOK { + return mocks.CreateHttpResponseWithBody(request, statusCode, creds) + } else { + return mocks.CreateEmptyHttpResponse(request, statusCode) + } + }) + + return nil +} + func setupMocksForAcr(mockContext *mocks.MockContext) { mockazsdk.MockContainerRegistryList(mockContext, []*armcontainerregistry.Registry{ { @@ -484,6 +502,17 @@ func createAksServiceTarget( envManager := &mockenv.MockEnvManager{} envManager.On("Save", *mockContext.Context, env).Return(nil) + resourceManager := &MockResourceManager{} + targetResource := environment.NewTargetResource( + "SUBSCRIPTION_ID", + "RESOURCE_GROUP", + "CLUSTER_NAME", + string(infra.AzureResourceTypeManagedCluster), + ) + resourceManager. + On("GetTargetResource", *mockContext.Context, "SUBSCRIPTION_ID", serviceConfig). + Return(targetResource, nil) + managedClustersService := azcli.NewManagedClustersService(credentialProvider, mockContext.HttpClient) containerRegistryService := azcli.NewContainerRegistryService(credentialProvider, mockContext.HttpClient, dockerCli) containerHelper := NewContainerHelper(env, envManager, clock.NewMock(), containerRegistryService, dockerCli) @@ -491,12 +520,31 @@ func createAksServiceTarget( return NewAksTarget( env, envManager, + mockContext.Console, managedClustersService, + resourceManager, kubeCtl, containerHelper, ) } +func simulateInitliaze(ctx context.Context, serviceTarget ServiceTarget, serviceConfig *ServiceConfig) error { + if err := serviceTarget.Initialize(ctx, serviceConfig); err != nil { + return err + } + + err := serviceConfig.RaiseEvent(ctx, "predeploy", ServiceLifecycleEventArgs{ + Project: serviceConfig.Project, + Service: serviceConfig, + }) + + if err != nil { + return err + } + + return nil +} + func createTestCluster(clusterName, username string) *kubectl.KubeConfig { return &kubectl.KubeConfig{ ApiVersion: "v1", @@ -535,3 +583,46 @@ func logProgress[T comparable, P comparable](task *async.TaskWithProgress[T, P]) } }() } + +type MockResourceManager struct { + mock.Mock +} + +func (m *MockResourceManager) GetResourceGroupName( + ctx context.Context, + subscriptionId string, + projectConfig *ProjectConfig, +) (string, error) { + args := m.Called(ctx, subscriptionId, projectConfig) + return args.String(0), args.Error(1) +} + +func (m *MockResourceManager) GetServiceResources( + ctx context.Context, + subscriptionId string, + resourceGroupName string, + serviceConfig *ServiceConfig, +) ([]azcli.AzCliResource, error) { + args := m.Called(ctx, subscriptionId, resourceGroupName, serviceConfig) + return args.Get(0).([]azcli.AzCliResource), args.Error(1) +} + +func (m *MockResourceManager) GetServiceResource( + ctx context.Context, + subscriptionId string, + resourceGroupName string, + serviceConfig *ServiceConfig, + rerunCommand string, +) (azcli.AzCliResource, error) { + args := m.Called(ctx, subscriptionId, resourceGroupName, serviceConfig, rerunCommand) + return args.Get(0).(azcli.AzCliResource), args.Error(1) +} + +func (m *MockResourceManager) GetTargetResource( + ctx context.Context, + subscriptionId string, + serviceConfig *ServiceConfig, +) (*environment.TargetResource, error) { + args := m.Called(ctx, subscriptionId, serviceConfig) + return args.Get(0).(*environment.TargetResource), args.Error(1) +} diff --git a/cli/azd/pkg/tools/azcli/managed_clusters.go b/cli/azd/pkg/tools/azcli/managed_clusters.go index a401c045dd4..8507b21f56a 100644 --- a/cli/azd/pkg/tools/azcli/managed_clusters.go +++ b/cli/azd/pkg/tools/azcli/managed_clusters.go @@ -19,6 +19,13 @@ type ManagedClustersService interface { resourceGroupName string, resourceName string, ) (*armcontainerservice.CredentialResults, error) + // Gets the user credentials for the specified resource + GetUserCredentials( + ctx context.Context, + subscriptionId string, + resourceGroupName string, + resourceName string, + ) (*armcontainerservice.CredentialResults, error) } type managedClustersService struct { @@ -39,6 +46,26 @@ func NewManagedClustersService( } } +// Gets the user credentials for the specified resource +func (cs *managedClustersService) GetUserCredentials( + ctx context.Context, + subscriptionId string, + resourceGroupName string, + resourceName string, +) (*armcontainerservice.CredentialResults, error) { + client, err := cs.createManagedClusterClient(ctx, subscriptionId) + if err != nil { + return nil, err + } + + credResult, err := client.ListClusterUserCredentials(ctx, resourceGroupName, resourceName, nil) + if err != nil { + return nil, err + } + + return &credResult.CredentialResults, nil +} + // Gets the admin credentials for the specified resource func (cs *managedClustersService) GetAdminCredentials( ctx context.Context, diff --git a/cli/azd/pkg/tools/kubectl/kube_config.go b/cli/azd/pkg/tools/kubectl/kube_config.go index d5cd1a676ec..37d1deb2ef5 100644 --- a/cli/azd/pkg/tools/kubectl/kube_config.go +++ b/cli/azd/pkg/tools/kubectl/kube_config.go @@ -41,24 +41,24 @@ func ParseKubeConfig(ctx context.Context, raw []byte) (*KubeConfig, error) { } // Saves the KubeConfig to the kube configuration folder with the specified name -func (kcm *KubeConfigManager) SaveKubeConfig(ctx context.Context, configName string, config *KubeConfig) error { +func (kcm *KubeConfigManager) SaveKubeConfig(ctx context.Context, configName string, config *KubeConfig) (string, error) { kubeConfigRaw, err := yaml.Marshal(config) if err != nil { - return fmt.Errorf("failed marshalling KubeConfig to yaml: %w", err) + return "", fmt.Errorf("failed marshalling KubeConfig to yaml: %w", err) } // Create .kube config folder if it doesn't already exist if err := os.MkdirAll(kcm.configPath, osutil.PermissionDirectory); err != nil { - return fmt.Errorf("failed creating .kube config directory, %w", err) + return "", fmt.Errorf("failed creating .kube config directory, %w", err) } outFilePath := filepath.Join(kcm.configPath, configName) err = os.WriteFile(outFilePath, kubeConfigRaw, osutil.PermissionFile) if err != nil { - return fmt.Errorf("failed writing kube config file: %w", err) + return "", fmt.Errorf("failed writing kube config file: %w", err) } - return nil + return outFilePath, nil } // Deletes the KubeConfig with the specified name @@ -75,44 +75,47 @@ func (kcm *KubeConfigManager) DeleteKubeConfig(ctx context.Context, configName s // Merges the specified kube configs into the kube config // This powers the use of the kubectl config set of commands that allow developers to switch between different // k8s cluster contexts -func (kcm *KubeConfigManager) MergeConfigs(ctx context.Context, newConfigName string, path ...string) error { +func (kcm *KubeConfigManager) MergeConfigs(ctx context.Context, newConfigName string, path ...string) (string, error) { fullConfigPaths := []string{} for _, kubeConfigName := range path { fullConfigPaths = append(fullConfigPaths, filepath.Join(kcm.configPath, kubeConfigName)) } - envValues := map[string]string{ - "KUBECONFIG": strings.Join(fullConfigPaths, string(os.PathListSeparator)), - } - kcm.cli.SetEnv(envValues) + kubeConfig := strings.Join(fullConfigPaths, string(os.PathListSeparator)) + kcm.cli.SetKubeConfig(kubeConfig) + res, err := kcm.cli.ConfigView(ctx, true, true, nil) if err != nil { - return fmt.Errorf("kubectl config view failed: %w", err) + return "", fmt.Errorf("kubectl config view failed: %w", err) } kubeConfigRaw := []byte(res.Stdout) outFilePath := filepath.Join(kcm.configPath, newConfigName) err = os.WriteFile(outFilePath, kubeConfigRaw, osutil.PermissionFile) if err != nil { - return fmt.Errorf("failed writing new kube config: %w", err) + return "", fmt.Errorf("failed writing new kube config: %w", err) } - return nil + return outFilePath, nil } // Adds a new or updates an existing KubeConfig in the main kube config -func (kcm *KubeConfigManager) AddOrUpdateContext(ctx context.Context, contextName string, newKubeConfig *KubeConfig) error { - err := kcm.SaveKubeConfig(ctx, contextName, newKubeConfig) +func (kcm *KubeConfigManager) AddOrUpdateContext( + ctx context.Context, + contextName string, + newKubeConfig *KubeConfig, +) (string, error) { + _, err := kcm.SaveKubeConfig(ctx, contextName, newKubeConfig) if err != nil { - return fmt.Errorf("failed write new kube context file: %w", err) + return "", fmt.Errorf("failed write new kube context file: %w", err) } - err = kcm.MergeConfigs(ctx, "config", contextName) + configPath, err := kcm.MergeConfigs(ctx, "config", contextName) if err != nil { - return fmt.Errorf("failed merging KUBE configs: %w", err) + return "", fmt.Errorf("failed merging KUBE configs: %w", err) } - return nil + return configPath, nil } func getKubeConfigDir() (string, error) { diff --git a/cli/azd/pkg/tools/kubectl/kube_config_test.go b/cli/azd/pkg/tools/kubectl/kube_config_test.go index 6c407d03ed0..ed7b1c4b91a 100644 --- a/cli/azd/pkg/tools/kubectl/kube_config_test.go +++ b/cli/azd/pkg/tools/kubectl/kube_config_test.go @@ -3,6 +3,7 @@ package kubectl import ( "context" "fmt" + "path/filepath" "testing" "github.com/azure/azure-dev/cli/azd/pkg/exec" @@ -30,15 +31,25 @@ func Test_MergeKubeConfig(t *testing.T) { require.NoError(t, err) }() - err = kubeConfigManager.SaveKubeConfig(*mockContext.Context, "config1", config1) + kubeConfigPath, err := kubeConfigManager.SaveKubeConfig(*mockContext.Context, "config1", config1) require.NoError(t, err) - err = kubeConfigManager.SaveKubeConfig(*mockContext.Context, "config2", config2) + require.NotEmpty(t, kubeConfigPath) + require.Contains(t, kubeConfigPath, filepath.Join(".kube", "config1")) + + kubeConfigPath, err = kubeConfigManager.SaveKubeConfig(*mockContext.Context, "config2", config2) require.NoError(t, err) - err = kubeConfigManager.SaveKubeConfig(*mockContext.Context, "config3", config3) + require.NotEmpty(t, kubeConfigPath) + require.Contains(t, kubeConfigPath, filepath.Join(".kube", "config2")) + + kubeConfigPath, err = kubeConfigManager.SaveKubeConfig(*mockContext.Context, "config3", config3) require.NoError(t, err) + require.NotEmpty(t, kubeConfigPath) + require.Contains(t, kubeConfigPath, filepath.Join(".kube", "config3")) - err = kubeConfigManager.MergeConfigs(*mockContext.Context, "config", "config1", "config2", "config3") + kubeConfigPath, err = kubeConfigManager.MergeConfigs(*mockContext.Context, "config", "config1", "config2", "config3") require.NoError(t, err) + require.NotEmpty(t, kubeConfigPath) + require.Contains(t, kubeConfigPath, filepath.Join(".kube", "config")) } func createTestCluster(clusterName, username string) *KubeConfig { diff --git a/cli/azd/pkg/tools/kubectl/kubectl.go b/cli/azd/pkg/tools/kubectl/kubectl.go index 5979e6b4e9b..a4d8e44ab09 100644 --- a/cli/azd/pkg/tools/kubectl/kubectl.go +++ b/cli/azd/pkg/tools/kubectl/kubectl.go @@ -21,6 +21,8 @@ type KubectlCli interface { Cwd(cwd string) // Sets the env vars available to the CLI SetEnv(env map[string]string) + // Sets the KUBECONFIG environment variable + SetKubeConfig(kubeConfig string) // Applies one or more files from the specified path Apply(ctx context.Context, path string, flags *KubeCliFlags) error // Applies manifests from the specified input @@ -84,6 +86,7 @@ type kubectlCli struct { func NewKubectl(commandRunner exec.CommandRunner) KubectlCli { return &kubectlCli{ commandRunner: commandRunner, + env: map[string]string{}, } } @@ -136,7 +139,14 @@ func (cli *kubectlCli) Name() string { // Sets the env vars available to the CLI func (cli *kubectlCli) SetEnv(envValues map[string]string) { - cli.env = envValues + for key, value := range envValues { + cli.env[key] = value + } +} + +// Sets the KUBECONFIG environment variable +func (cli *kubectlCli) SetKubeConfig(kubeConfig string) { + cli.env[KubeConfigEnvVarName] = kubeConfig } // Sets the current working directory @@ -175,8 +185,7 @@ func (cli *kubectlCli) ConfigView( } runArgs := exec.NewRunArgs("kubectl", args...). - WithCwd(kubeConfigDir). - WithEnv(environ(cli.env)) + WithCwd(kubeConfigDir) res, err := cli.executeCommandWithArgs(ctx, runArgs, flags) if err != nil { @@ -189,7 +198,6 @@ func (cli *kubectlCli) ConfigView( func (cli *kubectlCli) ApplyWithStdIn(ctx context.Context, input string, flags *KubeCliFlags) (*exec.RunResult, error) { runArgs := exec. NewRunArgs("kubectl", "apply", "-f", "-"). - WithEnv(environ(cli.env)). WithStdIn(strings.NewReader(input)) res, err := cli.executeCommandWithArgs(ctx, runArgs, flags) @@ -201,9 +209,7 @@ func (cli *kubectlCli) ApplyWithStdIn(ctx context.Context, input string, flags * } func (cli *kubectlCli) ApplyWithFile(ctx context.Context, filePath string, flags *KubeCliFlags) (*exec.RunResult, error) { - runArgs := exec. - NewRunArgs("kubectl", "apply", "-f", filePath). - WithEnv(environ(cli.env)) + runArgs := exec.NewRunArgs("kubectl", "apply", "-f", filePath) res, err := cli.executeCommandWithArgs(ctx, runArgs, flags) if err != nil { @@ -331,6 +337,8 @@ func (cli *kubectlCli) executeCommandWithArgs( args = args.WithCwd(cli.cwd) } + args = args.WithEnv(environ(cli.env)) + if flags != nil { if flags.DryRun != "" { args = args.AppendParams(fmt.Sprintf("--dry-run=%s", flags.DryRun)) diff --git a/cli/azd/pkg/tools/kubectl/models.go b/cli/azd/pkg/tools/kubectl/models.go index 129dbcb2dc6..fbac47c0f8b 100644 --- a/cli/azd/pkg/tools/kubectl/models.go +++ b/cli/azd/pkg/tools/kubectl/models.go @@ -11,6 +11,7 @@ const ( ResourceTypeDeployment ResourceType = "deployment" ResourceTypeIngress ResourceType = "ing" ResourceTypeService ResourceType = "svc" + KubeConfigEnvVarName string = "KUBECONFIG" ) type Resource struct { @@ -166,8 +167,9 @@ type KubeContext struct { } type KubeContextData struct { - Cluster string `yaml:"cluster"` - User string `yaml:"user"` + Cluster string `yaml:"cluster"` + Namespace string `yaml:"namespace"` + User string `yaml:"user"` } type KubeUser struct { diff --git a/cli/azd/pkg/tools/kubectl/util.go b/cli/azd/pkg/tools/kubectl/util.go index b7207642f07..ac2930f5f72 100644 --- a/cli/azd/pkg/tools/kubectl/util.go +++ b/cli/azd/pkg/tools/kubectl/util.go @@ -100,7 +100,6 @@ type ResourceFilterFn[T comparable] func(resource T) bool func WaitForResource[T comparable]( ctx context.Context, cli KubectlCli, - namespace string, resourceType ResourceType, resourceFilter ResourceFilterFn[T], readyStatusFilter ResourceFilterFn[T], @@ -111,9 +110,7 @@ func WaitForResource[T comparable]( ctx, retry.WithMaxDuration(time.Minute*10, retry.NewConstant(time.Second*10)), func(ctx context.Context) error { - result, err := GetResources[T](ctx, cli, resourceType, &KubeCliFlags{ - Namespace: namespace, - }) + result, err := GetResources[T](ctx, cli, resourceType, nil) if err != nil { return fmt.Errorf("failed waiting for resource, %w", err)