From e3d0f13c88dbc7c193fd3f9c77fdba0c25242bd0 Mon Sep 17 00:00:00 2001 From: Ilya Alekseyev Date: Fri, 7 Jun 2024 00:27:07 +0200 Subject: [PATCH] Add control plane failure domains feature for Nutanix provider (#8192) * Add failure domains support for Nutanix Provider - change Nutanix Datacenter CRD - change templates - generate manifests - add unittest * Fix lint error * Regenerate deepcopy files * Fix PR comments - add validation - fix template - add unittest for validation * Add validations and unit tests * Allow updating failure domains for existing clusters --- ...mazonaws.com_nutanixdatacenterconfigs.yaml | 64 ++ config/manifest/eksa-components.yaml | 64 ++ .../v1alpha1/nutanixdatacenterconfig_test.go | 16 + .../v1alpha1/nutanixdatacenterconfig_types.go | 58 ++ ...tacenterconfig-invalid-failuredomains.yaml | 30 + ...datacenterconfig-valid-failuredomains.yaml | 30 + pkg/api/v1alpha1/zz_generated.deepcopy.go | 30 + pkg/providers/nutanix/config/cp-template.yaml | 26 + pkg/providers/nutanix/provider.go | 1 - pkg/providers/nutanix/provider_test.go | 18 +- pkg/providers/nutanix/template.go | 31 + pkg/providers/nutanix/template_test.go | 28 + .../cluster_nutanix_failure_domains.yaml | 87 +++ ...datacenterConfig_with_failure_domains.yaml | 27 + ..._with_failure_domains_invalid_cluster.yaml | 27 + ...fig_with_failure_domains_invalid_name.yaml | 27 + ...g_with_failure_domains_invalid_subnet.yaml | 27 + .../expected_results_failure_domains.yaml | 631 ++++++++++++++++++ pkg/providers/nutanix/validator.go | 32 + pkg/providers/nutanix/validator_test.go | 138 ++++ 20 files changed, 1389 insertions(+), 3 deletions(-) create mode 100644 pkg/api/v1alpha1/testdata/nutanix/datacenterconfig-invalid-failuredomains.yaml create mode 100644 pkg/api/v1alpha1/testdata/nutanix/datacenterconfig-valid-failuredomains.yaml create mode 100644 pkg/providers/nutanix/testdata/cluster_nutanix_failure_domains.yaml create mode 100644 pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains.yaml create mode 100644 pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains_invalid_cluster.yaml create mode 100644 pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains_invalid_name.yaml create mode 100644 pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains_invalid_subnet.yaml create mode 100644 pkg/providers/nutanix/testdata/expected_results_failure_domains.yaml diff --git a/config/crd/bases/anywhere.eks.amazonaws.com_nutanixdatacenterconfigs.yaml b/config/crd/bases/anywhere.eks.amazonaws.com_nutanixdatacenterconfigs.yaml index 49d4c9af3a11..ddcf876d2e87 100644 --- a/config/crd/bases/anywhere.eks.amazonaws.com_nutanixdatacenterconfigs.yaml +++ b/config/crd/bases/anywhere.eks.amazonaws.com_nutanixdatacenterconfigs.yaml @@ -56,6 +56,70 @@ spec: endpoint: description: Endpoint is the Endpoint of Nutanix Prism Central type: string + failureDomains: + description: FailureDomains is the optional list of failure domains + for the Nutanix Datacenter. + items: + description: NutanixDatacenterFailureDomain defines the failure + domain for the Nutanix Datacenter. + properties: + cluster: + description: Cluster is the Prism Element cluster name or uuid + that is connected to the Prism Central. + properties: + name: + description: name is the resource name in the PC + type: string + type: + description: Type is the identifier type to use for this + resource. + enum: + - uuid + - name + type: string + uuid: + description: uuid is the UUID of the resource in the PC. + type: string + required: + - type + type: object + name: + description: Name is the unique name of the failure domain. + Name must be between 1 and 64 characters long. It must consist + of only lower case alphanumeric characters and hyphens (-). + It must start and end with an alphanumeric character. + maxLength: 64 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + subnets: + description: Subnets holds the list of subnets identifiers cluster's + network subnets. + items: + description: NutanixResourceIdentifier holds the identity + of a Nutanix Prism resource (cluster, image, subnet, etc.) + properties: + name: + description: name is the resource name in the PC + type: string + type: + description: Type is the identifier type to use for this + resource. + enum: + - uuid + - name + type: string + uuid: + description: uuid is the UUID of the resource in the PC. + type: string + required: + - type + type: object + type: array + required: + - name + type: object + type: array insecure: description: Insecure is the optional flag to skip TLS verification. Nutanix Prism Central installation by default ships with a self-signed diff --git a/config/manifest/eksa-components.yaml b/config/manifest/eksa-components.yaml index 179d992644b2..7077c75c929d 100644 --- a/config/manifest/eksa-components.yaml +++ b/config/manifest/eksa-components.yaml @@ -5507,6 +5507,70 @@ spec: endpoint: description: Endpoint is the Endpoint of Nutanix Prism Central type: string + failureDomains: + description: FailureDomains is the optional list of failure domains + for the Nutanix Datacenter. + items: + description: NutanixDatacenterFailureDomain defines the failure + domain for the Nutanix Datacenter. + properties: + cluster: + description: Cluster is the Prism Element cluster name or uuid + that is connected to the Prism Central. + properties: + name: + description: name is the resource name in the PC + type: string + type: + description: Type is the identifier type to use for this + resource. + enum: + - uuid + - name + type: string + uuid: + description: uuid is the UUID of the resource in the PC. + type: string + required: + - type + type: object + name: + description: Name is the unique name of the failure domain. + Name must be between 1 and 64 characters long. It must consist + of only lower case alphanumeric characters and hyphens (-). + It must start and end with an alphanumeric character. + maxLength: 64 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + subnets: + description: Subnets holds the list of subnets identifiers cluster's + network subnets. + items: + description: NutanixResourceIdentifier holds the identity + of a Nutanix Prism resource (cluster, image, subnet, etc.) + properties: + name: + description: name is the resource name in the PC + type: string + type: + description: Type is the identifier type to use for this + resource. + enum: + - uuid + - name + type: string + uuid: + description: uuid is the UUID of the resource in the PC. + type: string + required: + - type + type: object + type: array + required: + - name + type: object + type: array insecure: description: Insecure is the optional flag to skip TLS verification. Nutanix Prism Central installation by default ships with a self-signed diff --git a/pkg/api/v1alpha1/nutanixdatacenterconfig_test.go b/pkg/api/v1alpha1/nutanixdatacenterconfig_test.go index 1eaf40e16525..1055bb8e05c6 100644 --- a/pkg/api/v1alpha1/nutanixdatacenterconfig_test.go +++ b/pkg/api/v1alpha1/nutanixdatacenterconfig_test.go @@ -175,6 +175,22 @@ func TestGetNutanixDatacenterConfigValidConfig(t *testing.T) { assert.Contains(t, err.Error(), "NutanixDatacenterConfig credentialRef name is not set or is empty") }, }, + { + name: "datacenterconfig-valid-failure-domains", + fileName: "testdata/nutanix/datacenterconfig-valid-failuredomains.yaml", + assertions: func(t *testing.T, dcConf *v1alpha1.NutanixDatacenterConfig) { + assert.NoError(t, dcConf.Validate()) + }, + }, + { + name: "datecenterconfig-invalid-failure-domains", + fileName: "testdata/nutanix/datacenterconfig-invalid-failuredomains.yaml", + assertions: func(t *testing.T, dcConf *v1alpha1.NutanixDatacenterConfig) { + err := dcConf.Validate() + assert.Error(t, err) + assert.Contains(t, err.Error(), "NutanixDatacenterConfig.Spec.FailureDomains.Subnets: missing subnet UUID: default/eksa-unit-test") + }, + }, } for _, test := range tests { diff --git a/pkg/api/v1alpha1/nutanixdatacenterconfig_types.go b/pkg/api/v1alpha1/nutanixdatacenterconfig_types.go index 4fb6fc522d75..2ab1b77467f3 100644 --- a/pkg/api/v1alpha1/nutanixdatacenterconfig_types.go +++ b/pkg/api/v1alpha1/nutanixdatacenterconfig_types.go @@ -43,6 +43,31 @@ type NutanixDatacenterConfigSpec struct { // for the Nutanix Prism Central. The namespace for the secret is assumed to be a constant i.e. eksa-system. // +optional CredentialRef *Ref `json:"credentialRef,omitempty"` + + // FailureDomains is the optional list of failure domains for the Nutanix Datacenter. + // +optional + FailureDomains []NutanixDatacenterFailureDomain `json:"failureDomains,omitempty"` +} + +// NutanixDatacenterFailureDomain defines the failure domain for the Nutanix Datacenter. +type NutanixDatacenterFailureDomain struct { + // Name is the unique name of the failure domain. + // Name must be between 1 and 64 characters long. + // It must consist of only lower case alphanumeric characters and hyphens (-). + // It must start and end with an alphanumeric character. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=64 + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + Name string `json:"name"` + + // Cluster is the Prism Element cluster name or uuid that is connected to the Prism Central. + // +kubebuilder:validation:Required + Cluster NutanixResourceIdentifier `json:"cluster,omitempty"` + + // Subnets holds the list of subnets identifiers cluster's network subnets. + // +kubebuilder:validation:Required + Subnets []NutanixResourceIdentifier `json:"subnets,omitempty"` } // NutanixDatacenterConfigStatus defines the observed state of NutanixDatacenterConfig. @@ -140,9 +165,42 @@ func (in *NutanixDatacenterConfig) Validate() error { } } + if in.Spec.FailureDomains != nil && len(in.Spec.FailureDomains) != 0 { + dccName := in.Namespace + "/" + in.Name + validateClusterResourceIdentifier := createValidateNutanixResourceFunc("NutanixDatacenterConfig.Spec.FailureDomains.Cluster", "cluster", dccName) + validateSubnetResourceIdentifier := createValidateNutanixResourceFunc("NutanixDatacenterConfig.Spec.FailureDomains.Subnets", "subnet", dccName) + for _, fd := range in.Spec.FailureDomains { + if err := validateClusterResourceIdentifier(&fd.Cluster); err != nil { + return err + } + + for _, subnet := range fd.Subnets { + if err := validateSubnetResourceIdentifier(&subnet); err != nil { + return err + } + } + } + } + return nil } +func createValidateNutanixResourceFunc(msgPrefix, entityName, mfstName string) func(*NutanixResourceIdentifier) error { + return func(ntnxRId *NutanixResourceIdentifier) error { + if ntnxRId.Type != NutanixIdentifierName && ntnxRId.Type != NutanixIdentifierUUID { + return fmt.Errorf("%s: invalid identifier type for %s: %s", msgPrefix, entityName, ntnxRId.Type) + } + + if ntnxRId.Type == NutanixIdentifierName && (ntnxRId.Name == nil || *ntnxRId.Name == "") { + return fmt.Errorf("%s: missing %s name: %s", msgPrefix, entityName, mfstName) + } else if ntnxRId.Type == NutanixIdentifierUUID && (ntnxRId.UUID == nil || *ntnxRId.UUID == "") { + return fmt.Errorf("%s: missing %s UUID: %s", msgPrefix, entityName, mfstName) + } + + return nil + } +} + // SetDefaults sets default values for the NutanixDatacenterConfig object. func (in *NutanixDatacenterConfig) SetDefaults() { if in.Spec.CredentialRef == nil { diff --git a/pkg/api/v1alpha1/testdata/nutanix/datacenterconfig-invalid-failuredomains.yaml b/pkg/api/v1alpha1/testdata/nutanix/datacenterconfig-invalid-failuredomains.yaml new file mode 100644 index 000000000000..b25f74bc958c --- /dev/null +++ b/pkg/api/v1alpha1/testdata/nutanix/datacenterconfig-invalid-failuredomains.yaml @@ -0,0 +1,30 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + name: eksa-unit-test + kind: Secret + failureDomains: + - name: "pe1" + cluster: + type: name + name: "prism-cluster-1" + subnets: + - name: "prism-subnet-1" + type: "name" + - uuid: "" + type: "uuid" + - name: "pe2" + cluster: + type: "uuid" + uuid: "468b7b36-d15b-406a-90f7-46d1560c4f4e" + subnets: + - name: "prism-subnet-1" + type: "name" + - uuid: "3e716c09-0613-46f3-b46a-beb89aa02295" + type: "uuid" diff --git a/pkg/api/v1alpha1/testdata/nutanix/datacenterconfig-valid-failuredomains.yaml b/pkg/api/v1alpha1/testdata/nutanix/datacenterconfig-valid-failuredomains.yaml new file mode 100644 index 000000000000..02f806ff343e --- /dev/null +++ b/pkg/api/v1alpha1/testdata/nutanix/datacenterconfig-valid-failuredomains.yaml @@ -0,0 +1,30 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + name: eksa-unit-test + kind: Secret + failureDomains: + - name: "pe1" + cluster: + type: name + name: "prism-cluster-1" + subnets: + - name: "prism-subnet-1" + type: "name" + - uuid: "3e716c09-0613-46f3-b46a-beb89aa02295" + type: "uuid" + - name: "pe2" + cluster: + type: "uuid" + uuid: "468b7b36-d15b-406a-90f7-46d1560c4f4e" + subnets: + - name: "prism-subnet-1" + type: "name" + - uuid: "3e716c09-0613-46f3-b46a-beb89aa02295" + type: "uuid" diff --git a/pkg/api/v1alpha1/zz_generated.deepcopy.go b/pkg/api/v1alpha1/zz_generated.deepcopy.go index 14f6a15e57f1..b595f09e020a 100644 --- a/pkg/api/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/api/v1alpha1/zz_generated.deepcopy.go @@ -2012,6 +2012,13 @@ func (in *NutanixDatacenterConfigSpec) DeepCopyInto(out *NutanixDatacenterConfig *out = new(Ref) **out = **in } + if in.FailureDomains != nil { + in, out := &in.FailureDomains, &out.FailureDomains + *out = make([]NutanixDatacenterFailureDomain, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixDatacenterConfigSpec. @@ -2039,6 +2046,29 @@ func (in *NutanixDatacenterConfigStatus) DeepCopy() *NutanixDatacenterConfigStat return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixDatacenterFailureDomain) DeepCopyInto(out *NutanixDatacenterFailureDomain) { + *out = *in + in.Cluster.DeepCopyInto(&out.Cluster) + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]NutanixResourceIdentifier, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixDatacenterFailureDomain. +func (in *NutanixDatacenterFailureDomain) DeepCopy() *NutanixDatacenterFailureDomain { + if in == nil { + return nil + } + out := new(NutanixDatacenterFailureDomain) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NutanixMachineConfig) DeepCopyInto(out *NutanixMachineConfig) { *out = *in diff --git a/pkg/providers/nutanix/config/cp-template.yaml b/pkg/providers/nutanix/config/cp-template.yaml index 9d147c482598..82acdc53dc66 100644 --- a/pkg/providers/nutanix/config/cp-template.yaml +++ b/pkg/providers/nutanix/config/cp-template.yaml @@ -5,7 +5,33 @@ metadata: name: "{{.clusterName}}" namespace: "{{.eksaSystemNamespace}}" spec: +{{- if .failureDomains }} + failureDomains: + {{- range $index, $value := .failureDomains}} + - name: "{{ $value.Name }}" + cluster: + {{- if (eq $value.Cluster.Type "uuid") }} + type: "uuid" + uuid: "{{ $value.Cluster.UUID }}" + {{- else if (eq $value.Cluster.Type "name") }} + type: "name" + name: "{{ $value.Cluster.Name }}" + {{- end}} + subnets: + {{- range $value.Subnets}} + {{- if (eq .Type "uuid") }} + - type: "uuid" + uuid: "{{ .UUID }}" + {{- else if (eq .Type "name") }} + - type: "name" + name: "{{ .Name }}" + {{- end}} + {{- end}} + controlPlane: true + {{- end }} +{{- else }} failureDomains: [] +{{- end}} prismCentral: {{- if .nutanixAdditionalTrustBundle }} additionalTrustBundle: diff --git a/pkg/providers/nutanix/provider.go b/pkg/providers/nutanix/provider.go index e5a7de728682..bc330f656a4c 100644 --- a/pkg/providers/nutanix/provider.go +++ b/pkg/providers/nutanix/provider.go @@ -419,7 +419,6 @@ func needsNewEtcdTemplate(oldSpec, newSpec *cluster.Spec, oldNmc, newNmc *v1alph if oldSpec.Bundles.Spec.Number != newSpec.Bundles.Spec.Number { return true } - return AnyImmutableFieldChanged(oldNmc, newNmc) } diff --git a/pkg/providers/nutanix/provider_test.go b/pkg/providers/nutanix/provider_test.go index 119d683da6fa..5dd4bab83bbe 100644 --- a/pkg/providers/nutanix/provider_test.go +++ b/pkg/providers/nutanix/provider_test.go @@ -530,7 +530,20 @@ func TestNutanixProviderSetupAndValidateDeleteCluster(t *testing.T) { } func TestNutanixProviderSetupAndValidateUpgradeCluster(t *testing.T) { - provider := testDefaultNutanixProvider(t) + ctrl := gomock.NewController(t) + executable := mockexecutables.NewMockExecutable(ctrl) + executable.EXPECT().ExecuteWithStdin(gomock.Any(), gomock.Any(), gomock.Any()).Return(bytes.Buffer{}, nil).AnyTimes() + executable.EXPECT().Execute(gomock.Any(), "get", + "--ignore-not-found", "-o", "json", "--kubeconfig", "testdata/kubeconfig.yaml", "nutanixdatacenterconfigs.anywhere.eks.amazonaws.com", "--namespace", "default", "eksa-unit-test").Return(*bytes.NewBufferString(nutanixDatacenterConfigSpecJSON), nil).AnyTimes() + kubectl := executables.NewKubectl(executable) + mockClient := mocknutanix.NewMockClient(ctrl) + mockCertValidator := mockCrypto.NewMockTlsValidator(ctrl) + mockTransport := mocknutanix.NewMockRoundTripper(ctrl) + mockTransport.EXPECT().RoundTrip(gomock.Any()).Return(&http.Response{}, nil).AnyTimes() + mockHTTPClient := &http.Client{Transport: mockTransport} + mockWriter := filewritermocks.NewMockFileWriter(ctrl) + provider := testNutanixProvider(t, mockClient, kubectl, mockCertValidator, mockHTTPClient, mockWriter) + tests := []struct { name string clusterConfFile string @@ -558,7 +571,8 @@ func TestNutanixProviderSetupAndValidateUpgradeCluster(t *testing.T) { for _, tt := range tests { clusterSpec := test.NewFullClusterSpec(t, tt.clusterConfFile) - err := provider.SetupAndValidateUpgradeCluster(context.Background(), &types.Cluster{Name: "eksa-unit-test"}, clusterSpec, clusterSpec) + cluster := &types.Cluster{Name: "eksa-unit-test", KubeconfigFile: "testdata/kubeconfig.yaml"} + err := provider.SetupAndValidateUpgradeCluster(context.Background(), cluster, clusterSpec, clusterSpec) if tt.expectErr { assert.Error(t, err, tt.name) thenErrorExpected(t, tt.expectErrStr, err) diff --git a/pkg/providers/nutanix/template.go b/pkg/providers/nutanix/template.go index f1cb5e2493c7..24e93dcc03b2 100644 --- a/pkg/providers/nutanix/template.go +++ b/pkg/providers/nutanix/template.go @@ -7,6 +7,7 @@ import ( "sigs.k8s.io/yaml" + capxv1beta1 "github.com/nutanix-cloud-native/cluster-api-provider-nutanix/api/v1beta1" "github.com/nutanix-cloud-native/prism-go-client/environment/credentials" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" @@ -176,6 +177,8 @@ func buildTemplateMapCP( return nil, err } + failureDomains := generateNutanixFailureDomains(datacenterSpec.FailureDomains) + values := map[string]interface{}{ "auditPolicy": auditPolicy, "apiServerExtraArgs": apiServerExtraArgs.ToPartialYaml(), @@ -188,6 +191,7 @@ func buildTemplateMapCP( "controlPlaneTaints": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Taints, "eksaSystemNamespace": constants.EksaSystemNamespace, "format": format, + "failureDomains": failureDomains, "podCidrs": clusterSpec.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks, "serviceCidrs": clusterSpec.Cluster.Spec.ClusterNetwork.Services.CidrBlocks, "kubernetesVersion": versionsBundle.KubeDistro.Kubernetes.Tag, @@ -460,3 +464,30 @@ func generateNoProxyList(clusterSpec *cluster.Spec) []string { return noProxyList } + +func generateNutanixFailureDomains(eksNutanixFailureDomains []v1alpha1.NutanixDatacenterFailureDomain) []capxv1beta1.NutanixFailureDomain { + var failureDomains []capxv1beta1.NutanixFailureDomain + for _, fd := range eksNutanixFailureDomains { + + subnets := []capxv1beta1.NutanixResourceIdentifier{} + for _, subnet := range fd.Subnets { + subnets = append(subnets, capxv1beta1.NutanixResourceIdentifier{ + Type: capxv1beta1.NutanixIdentifierType(subnet.Type), + Name: subnet.Name, + UUID: subnet.UUID, + }) + } + + failureDomains = append(failureDomains, capxv1beta1.NutanixFailureDomain{ + Name: fd.Name, + Cluster: capxv1beta1.NutanixResourceIdentifier{ + Type: capxv1beta1.NutanixIdentifierType(fd.Cluster.Type), + Name: fd.Cluster.Name, + UUID: fd.Cluster.UUID, + }, + Subnets: subnets, + ControlPlane: true, + }) + } + return failureDomains +} diff --git a/pkg/providers/nutanix/template_test.go b/pkg/providers/nutanix/template_test.go index d931c0bf6040..037081059f57 100644 --- a/pkg/providers/nutanix/template_test.go +++ b/pkg/providers/nutanix/template_test.go @@ -686,6 +686,34 @@ func TestTemplateBuilderEtcdEncryptionKubernetes129(t *testing.T) { } } +func TestTemplateBuilderFailureDomains(t *testing.T) { + for _, tc := range []struct { + Input string + Output string + }{ + { + Input: "testdata/cluster_nutanix_failure_domains.yaml", + Output: "testdata/expected_results_failure_domains.yaml", + }, + } { + clusterSpec := test.NewFullClusterSpec(t, tc.Input) + + machineCfg := clusterSpec.NutanixMachineConfig(clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name) + + t.Setenv(constants.EksaNutanixUsernameKey, "admin") + t.Setenv(constants.EksaNutanixPasswordKey, "password") + creds := GetCredsFromEnv() + + bldr := NewNutanixTemplateBuilder(&clusterSpec.NutanixDatacenter.Spec, &machineCfg.Spec, nil, + map[string]anywherev1.NutanixMachineConfigSpec{}, creds, time.Now) + + data, err := bldr.GenerateCAPISpecControlPlane(clusterSpec) + assert.NoError(t, err) + + test.AssertContentToFile(t, string(data), tc.Output) + } +} + func minimalNutanixConfigSpec(t *testing.T) (*anywherev1.NutanixDatacenterConfig, *anywherev1.NutanixMachineConfig, map[string]anywherev1.NutanixMachineConfigSpec) { dcConf := &anywherev1.NutanixDatacenterConfig{} err := yaml.Unmarshal([]byte(nutanixDatacenterConfigSpec), dcConf) diff --git a/pkg/providers/nutanix/testdata/cluster_nutanix_failure_domains.yaml b/pkg/providers/nutanix/testdata/cluster_nutanix_failure_domains.yaml new file mode 100644 index 000000000000..c5750e15cfb5 --- /dev/null +++ b/pkg/providers/nutanix/testdata/cluster_nutanix_failure_domains.yaml @@ -0,0 +1,87 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: test + namespace: default +spec: + kubernetesVersion: "1.19" + controlPlaneConfiguration: + name: test + count: 1 + endpoint: + host: test + machineGroupRef: + name: test + kind: NutanixMachineConfig + datacenterRef: + kind: NutanixDatacenterConfig + name: test + clusterNetwork: + cni: "cilium" + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 + workerNodeGroupConfigurations: + - count: 3 + machineGroupRef: + kind: NutanixMachineConfig + name: test +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + kind: Secret + name: "nutanix-credentials" + failureDomains: + - name: "pe1" + cluster: + type: name + name: "prism-cluster-1" + subnets: + - name: "prism-subnet-1" + type: "name" + - uuid: "3e716c09-0613-46f3-b46a-beb89aa02295" + type: "uuid" + - name: "pe2" + cluster: + type: "uuid" + uuid: "468b7b36-d15b-406a-90f7-46d1560c4f4e" + subnets: + - name: "prism-subnet-1" + type: "name" + - uuid: "3e716c09-0613-46f3-b46a-beb89aa02295" + type: "uuid" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixMachineConfig +metadata: + name: test + namespace: default +spec: + vcpusPerSocket: 1 + vcpuSockets: 4 + memorySize: 8Gi + image: + type: "name" + name: "prism-image-1-19" + cluster: + type: "name" + name: "prism-cluster" + subnet: + type: "name" + name: "prism-subnet" + systemDiskSize: 40Gi + osFamily: "ubuntu" + users: + - name: "mySshUsername" + sshAuthorizedKeys: + - "mySshAuthorizedKey" diff --git a/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains.yaml b/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains.yaml new file mode 100644 index 000000000000..25f95fa4cf24 --- /dev/null +++ b/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains.yaml @@ -0,0 +1,27 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + kind: Secret + name: "nutanix-credentials" + insecure: true + failureDomains: + - name: "pe1" + cluster: + type: name + name: "prism-cluster" + subnets: + - type: uuid + uuid: "2d166190-7759-4dc6-b835-923262d6b497" + - name: "pe2" + cluster: + type: uuid + uuid: "4d69ca7d-022f-49d1-a454-74535993bda4" + subnets: + - type: name + name: "prism-subnet" diff --git a/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains_invalid_cluster.yaml b/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains_invalid_cluster.yaml new file mode 100644 index 000000000000..91a7f99954f3 --- /dev/null +++ b/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains_invalid_cluster.yaml @@ -0,0 +1,27 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + kind: Secret + name: "nutanix-credentials" + insecure: true + failureDomains: + - name: "pe1" + cluster: + type: name + name: "prism-cluster" + subnets: + - type: uuid + uuid: "2d166190-7759-4dc6-b835-923262d6b497" + - name: "pe2" + cluster: + type: uuid + uuid: "4d69ca7d-022f-49d1-a454-00005993bda4" + subnets: + - type: name + name: "prism-subnet" diff --git a/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains_invalid_name.yaml b/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains_invalid_name.yaml new file mode 100644 index 000000000000..c4dda7d7650f --- /dev/null +++ b/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains_invalid_name.yaml @@ -0,0 +1,27 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + kind: Secret + name: "nutanix-credentials" + insecure: true + failureDomains: + - name: "FIZZBUZZ!!!!" + cluster: + type: name + name: "prism-cluster" + subnets: + - type: uuid + uuid: "2d166190-7759-4dc6-b835-923262d6b497" + - name: "pe2" + cluster: + type: uuid + uuid: "4d69ca7d-022f-49d1-a454-74535993bda4" + subnets: + - type: name + name: "prism-subnet" diff --git a/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains_invalid_subnet.yaml b/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains_invalid_subnet.yaml new file mode 100644 index 000000000000..a35a86b484b2 --- /dev/null +++ b/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains_invalid_subnet.yaml @@ -0,0 +1,27 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + kind: Secret + name: "nutanix-credentials" + insecure: true + failureDomains: + - name: "pe1" + cluster: + type: name + name: "prism-cluster" + subnets: + - type: uuid + uuid: "2d166190-7759-4dc6-b835-000062d6b497" + - name: "pe2" + cluster: + type: uuid + uuid: "4d69ca7d-022f-49d1-a454-74535993bda4" + subnets: + - type: name + name: "prism-subnet" diff --git a/pkg/providers/nutanix/testdata/expected_results_failure_domains.yaml b/pkg/providers/nutanix/testdata/expected_results_failure_domains.yaml new file mode 100644 index 000000000000..b3ff855aa819 --- /dev/null +++ b/pkg/providers/nutanix/testdata/expected_results_failure_domains.yaml @@ -0,0 +1,631 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: "test" + namespace: "eksa-system" +spec: + failureDomains: + - name: "pe1" + cluster: + type: "name" + name: "prism-cluster-1" + subnets: + - type: "name" + name: "prism-subnet-1" + - type: "uuid" + uuid: "3e716c09-0613-46f3-b46a-beb89aa02295" + controlPlane: true + - name: "pe2" + cluster: + type: "uuid" + uuid: "468b7b36-d15b-406a-90f7-46d1560c4f4e" + subnets: + - type: "name" + name: "prism-subnet-1" + - type: "uuid" + uuid: "3e716c09-0613-46f3-b46a-beb89aa02295" + controlPlane: true + prismCentral: + address: "prism.nutanix.com" + port: 9440 + insecure: false + credentialRef: + name: "capx-test" + kind: Secret + controlPlaneEndpoint: + host: "test" + port: 6443 +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: "test" + name: "test" + namespace: "eksa-system" +spec: + clusterNetwork: + services: + cidrBlocks: [10.96.0.0/12] + pods: + cidrBlocks: [192.168.0.0/16] + serviceDomain: "cluster.local" + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: "test" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixCluster + name: "test" +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: "test" + namespace: "eksa-system" +spec: + replicas: 1 + version: "v1.19.8-eks-1-19-4" + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + name: "" + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: "public.ecr.aws/eks-distro/kubernetes" + apiServer: + certSANs: + - localhost + - 127.0.0.1 + - 0.0.0.0 + extraArgs: + cloud-provider: external + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + controllerManager: + extraArgs: + cloud-provider: external + enable-hostpath-provisioner: "true" + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.0-eks-1-19-4 + etcd: + local: + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.14-eks-1-19-4 + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - name: kube-vip + image: + imagePullPolicy: IfNotPresent + args: + - manager + env: + - name: vip_arp + value: "true" + - name: address + value: "test" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: svc_enable + value: "false" + - name: lb_enable + value: "false" + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_TIME + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + resources: {} + hostNetwork: true + volumes: + - name: kubeconfig + hostPath: + type: FileOrCreate + path: /etc/kubernetes/admin.conf + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd + # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 + #cgroup-driver: cgroupfs + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: "{{ ds.meta_data.hostname }}" + users: + - name: "mySshUsername" + lockPassword: false + sudo: ALL=(ALL) NOPASSWD:ALL + sshAuthorizedKeys: + - "mySshAuthorizedKey" + preKubeadmCommands: + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >> /etc/hosts + postKubeadmCommands: + - echo export KUBECONFIG=/etc/kubernetes/admin.conf >> /root/.bashrc + useExperimentalRetryJoin: true +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixMachineTemplate +metadata: + name: "" + namespace: "eksa-system" +spec: + template: + spec: + providerID: "nutanix://test-m1" + vcpusPerSocket: 1 + vcpuSockets: 4 + memorySize: 8Gi + systemDiskSize: 40Gi + image: + type: name + name: "prism-image-1-19" + + cluster: + type: name + name: "prism-cluster" + subnet: + - type: name + name: "prism-subnet" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-nutanix-ccm + namespace: "eksa-system" +data: + nutanix-ccm.yaml: | + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: cloud-controller-manager + namespace: kube-system + --- + kind: ConfigMap + apiVersion: v1 + metadata: + name: nutanix-config + namespace: kube-system + data: + nutanix_config.json: |- + { + "prismCentral": { + "address": "prism.nutanix.com", + "port": 9440, + "insecure": false, + "credentialRef": { + "kind": "secret", + "name": "nutanix-creds", + "namespace": "kube-system" + } + }, + "enableCustomLabeling": false, + "topologyDiscovery": { + "type": "Prism" + } + } + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - "*" + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + --- + kind: ClusterRoleBinding + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + labels: + k8s-app: nutanix-cloud-controller-manager + name: nutanix-cloud-controller-manager + namespace: kube-system + spec: + replicas: 1 + selector: + matchLabels: + k8s-app: nutanix-cloud-controller-manager + strategy: + type: Recreate + template: + metadata: + labels: + k8s-app: nutanix-cloud-controller-manager + spec: + hostNetwork: true + priorityClassName: system-cluster-critical + nodeSelector: + node-role.kubernetes.io/control-plane: "" + serviceAccountName: cloud-controller-manager + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: nutanix-cloud-controller-manager + topologyKey: kubernetes.io/hostname + dnsPolicy: Default + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 120 + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 120 + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + containers: + - image: "" + imagePullPolicy: IfNotPresent + name: nutanix-cloud-controller-manager + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + args: + - "--leader-elect=true" + - "--cloud-config=/etc/cloud/nutanix_config.json" + resources: + requests: + cpu: 100m + memory: 50Mi + volumeMounts: + - mountPath: /etc/cloud + name: nutanix-config-volume + readOnly: true + volumes: + - name: nutanix-config-volume + configMap: + name: nutanix-config +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: test-nutanix-ccm-crs + namespace: "eksa-system" +spec: + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: "test" + resources: + - kind: ConfigMap + name: test-nutanix-ccm + - kind: Secret + name: test-nutanix-ccm-secret + strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/validator.go b/pkg/providers/nutanix/validator.go index 2bce2a5c543c..d5cf8ad732ff 100644 --- a/pkg/providers/nutanix/validator.go +++ b/pkg/providers/nutanix/validator.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net/http" + "regexp" "strconv" "strings" @@ -126,6 +127,37 @@ func (v *Validator) ValidateDatacenterConfig(ctx context.Context, client Client, return err } + if err := v.validateFailureDomains(ctx, client, config); err != nil { + return err + } + + return nil +} + +func (v *Validator) validateFailureDomains(ctx context.Context, client Client, config *anywherev1.NutanixDatacenterConfig) error { + regexName, err := regexp.Compile("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$") + if err != nil { + return err + } + + for _, fd := range config.Spec.FailureDomains { + if res := regexName.MatchString(fd.Name); !res { + errorStr := `failure domain name should contains only small letters, digits, and hyphens. + It should start with small letter or digit` + return fmt.Errorf(errorStr) + } + + if err := v.validateClusterConfig(ctx, client, fd.Cluster); err != nil { + return err + } + + for _, subnet := range fd.Subnets { + if err := v.validateSubnetConfig(ctx, client, subnet); err != nil { + return err + } + } + } + return nil } diff --git a/pkg/providers/nutanix/validator_test.go b/pkg/providers/nutanix/validator_test.go index 802a365b3654..01fdb204aa17 100644 --- a/pkg/providers/nutanix/validator_test.go +++ b/pkg/providers/nutanix/validator_test.go @@ -5,7 +5,9 @@ import ( _ "embed" "encoding/json" "errors" + "fmt" "net/http" + "strings" "testing" "github.com/golang/mock/gomock" @@ -45,6 +47,18 @@ var nutanixDatacenterConfigSpecWithInvalidCredentialRefKind string //go:embed testdata/datacenterConfig_empty_credentialRef_name.yaml var nutanixDatacenterConfigSpecWithEmptyCredentialRefName string +//go:embed testdata/datacenterConfig_with_failure_domains.yaml +var nutanixDatacenterConfigSpecWithFailureDomain string + +//go:embed testdata/datacenterConfig_with_failure_domains_invalid_name.yaml +var nutanixDatacenterConfigSpecWithFailureDomainInvalidName string + +//go:embed testdata/datacenterConfig_with_failure_domains_invalid_cluster.yaml +var nutanixDatacenterConfigSpecWithFailureDomainInvalidCluster string + +//go:embed testdata/datacenterConfig_with_failure_domains_invalid_subnet.yaml +var nutanixDatacenterConfigSpecWithFailureDomainInvalidSubnet string + func fakeClusterList() *v3.ClusterListIntentResponse { return &v3.ClusterListIntentResponse{ Entities: []*v3.ClusterIntentResponse{ @@ -82,6 +96,96 @@ func fakeSubnetList() *v3.SubnetListIntentResponse { } } +func fakeClusterListForDCTest(filter *string) (*v3.ClusterListIntentResponse, error) { + data := &v3.ClusterListIntentResponse{ + Entities: []*v3.ClusterIntentResponse{ + { + Metadata: &v3.Metadata{ + UUID: utils.StringPtr("a15f6966-bfc7-4d1e-8575-224096fc1cdb"), + }, + Spec: &v3.Cluster{ + Name: utils.StringPtr("prism-cluster"), + }, + Status: &v3.ClusterDefStatus{ + Resources: &v3.ClusterObj{ + Config: &v3.ClusterConfig{ + ServiceList: []*string{utils.StringPtr("AOS")}, + }, + }, + }, + }, + { + Metadata: &v3.Metadata{ + UUID: utils.StringPtr("4d69ca7d-022f-49d1-a454-74535993bda4"), + }, + Spec: &v3.Cluster{ + Name: utils.StringPtr("prism-cluster-1"), + }, + Status: &v3.ClusterDefStatus{ + Resources: &v3.ClusterObj{ + Config: &v3.ClusterConfig{ + ServiceList: []*string{utils.StringPtr("AOS")}, + }, + }, + }, + }, + }, + } + + result := &v3.ClusterListIntentResponse{ + Entities: []*v3.ClusterIntentResponse{}, + } + + if filter != nil && *filter != "" { + str := strings.Replace(*filter, "name==", "", -1) + for _, cluster := range data.Entities { + if str == *cluster.Spec.Name { + result.Entities = append(result.Entities, cluster) + } + } + } + + return result, nil +} + +func fakeSubnetListForDCTest(filter *string) (*v3.SubnetListIntentResponse, error) { + data := &v3.SubnetListIntentResponse{ + Entities: []*v3.SubnetIntentResponse{ + { + Metadata: &v3.Metadata{ + UUID: utils.StringPtr("b15f6966-bfc7-4d1e-8575-224096fc1cdb"), + }, + Spec: &v3.Subnet{ + Name: utils.StringPtr("prism-subnet"), + }, + }, + { + Metadata: &v3.Metadata{ + UUID: utils.StringPtr("2d166190-7759-4dc6-b835-923262d6b497"), + }, + Spec: &v3.Subnet{ + Name: utils.StringPtr("prism-subnet-1"), + }, + }, + }, + } + + result := &v3.SubnetListIntentResponse{ + Entities: []*v3.SubnetIntentResponse{}, + } + + if filter != nil && *filter != "" { + str := strings.Replace(*filter, "name==", "", -1) + for _, subnet := range data.Entities { + if str == *subnet.Spec.Name { + result.Entities = append(result.Entities, subnet) + } + } + } + + return result, nil +} + func fakeImageList() *v3.ImageListIntentResponse { return &v3.ImageListIntentResponse{ Entities: []*v3.ImageIntentResponse{ @@ -596,11 +700,45 @@ func TestNutanixValidatorValidateDatacenterConfig(t *testing.T) { dcConfFile: nutanixDatacenterConfigSpecWithEmptyCredentialRefName, expectErr: true, }, + { + name: "valid failure domains", + dcConfFile: nutanixDatacenterConfigSpecWithFailureDomain, + expectErr: false, + }, + { + name: "failure domain with invalid name", + dcConfFile: nutanixDatacenterConfigSpecWithFailureDomainInvalidName, + expectErr: true, + }, + { + name: "failure domain with invalid cluster", + dcConfFile: nutanixDatacenterConfigSpecWithFailureDomainInvalidCluster, + expectErr: true, + }, + { + name: "failure domains with invalid subnet", + dcConfFile: nutanixDatacenterConfigSpecWithFailureDomainInvalidSubnet, + expectErr: true, + }, } ctrl := gomock.NewController(t) mockClient := mocknutanix.NewMockClient(ctrl) mockClient.EXPECT().GetCurrentLoggedInUser(gomock.Any()).Return(&v3.UserIntentResponse{}, nil).AnyTimes() + mockClient.EXPECT().ListCluster(gomock.Any(), gomock.Any()).DoAndReturn( + func(_ context.Context, filters *v3.DSMetadata) (*v3.ClusterListIntentResponse, error) { + return fakeClusterListForDCTest(filters.Filter) + }, + ).AnyTimes() + mockClient.EXPECT().ListSubnet(gomock.Any(), gomock.Any()).DoAndReturn( + func(_ context.Context, filters *v3.DSMetadata) (*v3.SubnetListIntentResponse, error) { + return fakeSubnetListForDCTest(filters.Filter) + }, + ).AnyTimes() + mockClient.EXPECT().GetSubnet(gomock.Any(), gomock.Eq("2d166190-7759-4dc6-b835-923262d6b497")).Return(nil, nil).AnyTimes() + mockClient.EXPECT().GetSubnet(gomock.Any(), gomock.Not("2d166190-7759-4dc6-b835-923262d6b497")).Return(nil, fmt.Errorf("")).AnyTimes() + mockClient.EXPECT().GetCluster(gomock.Any(), gomock.Eq("4d69ca7d-022f-49d1-a454-74535993bda4")).Return(nil, nil).AnyTimes() + mockClient.EXPECT().GetCluster(gomock.Any(), gomock.Not("4d69ca7d-022f-49d1-a454-74535993bda4")).Return(nil, fmt.Errorf("")).AnyTimes() mockTLSValidator := mockCrypto.NewMockTlsValidator(ctrl) mockTLSValidator.EXPECT().ValidateCert(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()