Skip to content

Commit

Permalink
Allow setting ScaleDownMode and SpotMaxPrice for AzureManagedMachinePool
Browse files Browse the repository at this point in the history
  • Loading branch information
maciaszczykm committed Jul 13, 2023
1 parent 5362192 commit 25bc801
Show file tree
Hide file tree
Showing 9 changed files with 268 additions and 0 deletions.
13 changes: 13 additions & 0 deletions api/v1beta1/azuremanagedmachinepool_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
package v1beta1

import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
capierrors "sigs.k8s.io/cluster-api/errors"
Expand Down Expand Up @@ -520,6 +521,18 @@ type AzureManagedMachinePoolSpec struct {
// +optional
ScaleSetPriority *string `json:"scaleSetPriority,omitempty"`

// ScaleDownMode affects the cluster autoscaler behavior. Default to Delete. Possible values include: 'Deallocate', 'Delete'
// +kubebuilder:validation:Enum=Deallocate;Delete
// +kubebuilder:default=Delete
// +optional
ScaleDownMode *string `json:"scaleDownMode,omitempty"`

// SpotMaxPrice defines max price to pay for spot instance. Possible values are any decimal value greater than zero or -1.
// If you set the max price to be -1, the VM won't be evicted based on price. The price for the VM will be the current price
// for spot or the price for a standard VM, which ever is less, as long as there's capacity and quota available.
// +optional
SpotMaxPrice *resource.Quantity `json:"spotMaxPrice,omitempty"`

// KubeletConfig specifies the kubelet configurations for nodes.
// Immutable.
// +optional
Expand Down
10 changes: 10 additions & 0 deletions api/v1beta1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions azure/converters/managedagentpool.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,8 @@ func AgentPoolToManagedClusterAgentPoolProfile(pool containerservice.AgentPool)
EnableNodePublicIP: properties.EnableNodePublicIP,
NodePublicIPPrefixID: properties.NodePublicIPPrefixID,
ScaleSetPriority: properties.ScaleSetPriority,
ScaleDownMode: properties.ScaleDownMode,
SpotMaxPrice: properties.SpotMaxPrice,
Tags: properties.Tags,
KubeletDiskType: properties.KubeletDiskType,
LinuxOSConfig: properties.LinuxOSConfig,
Expand Down
2 changes: 2 additions & 0 deletions azure/scope/managedmachinepool.go
Original file line number Diff line number Diff line change
Expand Up @@ -189,6 +189,8 @@ func buildAgentPoolSpec(managedControlPlane *infrav1.AzureManagedControlPlane,
EnableNodePublicIP: managedMachinePool.Spec.EnableNodePublicIP,
NodePublicIPPrefixID: managedMachinePool.Spec.NodePublicIPPrefixID,
ScaleSetPriority: managedMachinePool.Spec.ScaleSetPriority,
ScaleDownMode: managedMachinePool.Spec.ScaleDownMode,
SpotMaxPrice: managedMachinePool.Spec.SpotMaxPrice,
AdditionalTags: managedMachinePool.Spec.AdditionalTags,
KubeletDiskType: managedMachinePool.Spec.KubeletDiskType,
LinuxOSConfig: managedMachinePool.Spec.LinuxOSConfig,
Expand Down
20 changes: 20 additions & 0 deletions azure/services/agentpools/spec.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
"github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2022-03-01/containerservice"
"github.com/google/go-cmp/cmp"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/utils/pointer"
infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1"
"sigs.k8s.io/cluster-api-provider-azure/azure"
Expand Down Expand Up @@ -129,6 +130,12 @@ type AgentPoolSpec struct {
// ScaleSetPriority specifies the ScaleSetPriority for the node pool. Allowed values are 'Spot' and 'Regular'
ScaleSetPriority *string `json:"scaleSetPriority,omitempty"`

// ScaleDownMode affects the cluster autoscaler behavior. Allowed values are 'Deallocate' and 'Delete'
ScaleDownMode *string `json:"scaleDownMode,omitempty"`

// SpotMaxPrice defines max price to pay for spot instance. Allowed values are any decimal value greater than zero or -1 which indicates the willingness to pay any on-demand price.
SpotMaxPrice *resource.Quantity `json:"spotMaxPrice,omitempty"`

// KubeletConfig specifies the kubelet configurations for nodes.
KubeletConfig *KubeletConfig `json:"kubeletConfig,omitempty"`

Expand Down Expand Up @@ -193,6 +200,8 @@ func (s *AgentPoolSpec) Parameters(ctx context.Context, existing interface{}) (p
NodeLabels: existingPool.NodeLabels,
NodeTaints: existingPool.NodeTaints,
Tags: existingPool.Tags,
ScaleDownMode: existingPool.ScaleDownMode,
SpotMaxPrice: existingPool.SpotMaxPrice,
KubeletConfig: existingPool.KubeletConfig,
},
}
Expand All @@ -207,13 +216,18 @@ func (s *AgentPoolSpec) Parameters(ctx context.Context, existing interface{}) (p
MaxCount: s.MaxCount,
NodeLabels: s.NodeLabels,
NodeTaints: &s.NodeTaints,
ScaleDownMode: containerservice.ScaleDownMode(pointer.StringDeref(s.ScaleDownMode, "")),
Tags: converters.TagsToMap(s.AdditionalTags),
},
}
if len(*normalizedProfile.NodeTaints) == 0 {
normalizedProfile.NodeTaints = nil
}

if s.SpotMaxPrice != nil {
normalizedProfile.SpotMaxPrice = pointer.Float64(s.SpotMaxPrice.AsApproximateFloat64())
}

if s.KubeletConfig != nil {
normalizedProfile.KubeletConfig = &containerservice.KubeletConfig{
CPUManagerPolicy: s.KubeletConfig.CPUManagerPolicy,
Expand Down Expand Up @@ -267,6 +281,10 @@ func (s *AgentPoolSpec) Parameters(ctx context.Context, existing interface{}) (p
if s.SKU != "" {
sku = &s.SKU
}
var spotMaxPrice *float64
if s.SpotMaxPrice != nil {
spotMaxPrice = pointer.Float64(s.SpotMaxPrice.AsApproximateFloat64())
}
tags := converters.TagsToMap(s.AdditionalTags)
if tags == nil {
// Make sure we send a non-nil, empty map if AdditionalTags are nil as this tells AKS to delete any existing tags.
Expand Down Expand Up @@ -354,6 +372,8 @@ func (s *AgentPoolSpec) Parameters(ctx context.Context, existing interface{}) (p
OsDiskType: containerservice.OSDiskType(pointer.StringDeref(s.OsDiskType, "")),
OsType: containerservice.OSType(pointer.StringDeref(s.OSType, "")),
ScaleSetPriority: containerservice.ScaleSetPriority(pointer.StringDeref(s.ScaleSetPriority, "")),
ScaleDownMode: containerservice.ScaleDownMode(pointer.StringDeref(s.ScaleDownMode, "")),
SpotMaxPrice: spotMaxPrice,
Type: containerservice.AgentPoolTypeVirtualMachineScaleSets,
VMSize: sku,
VnetSubnetID: vnetSubnetID,
Expand Down
53 changes: 53 additions & 0 deletions azure/services/agentpools/spec_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import (
"github.com/google/go-cmp/cmp"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/utils/pointer"
infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1"
"sigs.k8s.io/cluster-api-provider-azure/azure"
Expand Down Expand Up @@ -76,6 +77,13 @@ func withAutoscaling(enabled bool) func(*AgentPoolSpec) {
}
}

func withSpotMaxPrice(spotMaxPrice string) func(*AgentPoolSpec) {
quantity := resource.MustParse(spotMaxPrice)
return func(pool *AgentPoolSpec) {
pool.SpotMaxPrice = &quantity
}
}

func sdkFakeAgentPool(changes ...func(*containerservice.AgentPool)) containerservice.AgentPool {
pool := containerservice.AgentPool{
ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{
Expand Down Expand Up @@ -114,6 +122,18 @@ func sdkWithAutoscaling(enableAutoscaling bool) func(*containerservice.AgentPool
}
}

func sdkWithScaleDownMode(scaleDownMode containerservice.ScaleDownMode) func(*containerservice.AgentPool) {
return func(pool *containerservice.AgentPool) {
pool.ManagedClusterAgentPoolProfileProperties.ScaleDownMode = scaleDownMode
}
}

func sdkWithSpotMaxPrice(spotMaxPrice float64) func(*containerservice.AgentPool) {
return func(pool *containerservice.AgentPool) {
pool.ManagedClusterAgentPoolProfileProperties.SpotMaxPrice = &spotMaxPrice
}
}

func sdkWithCount(count int32) func(*containerservice.AgentPool) {
return func(pool *containerservice.AgentPool) {
pool.ManagedClusterAgentPoolProfileProperties.Count = pointer.Int32(count)
Expand Down Expand Up @@ -215,6 +235,39 @@ func TestParameters(t *testing.T) {
expected: sdkFakeAgentPool(),
expectedError: nil,
},
{
name: "parameters with an existing agent pool and update needed on scale down mode",
spec: fakeAgentPool(),
existing: sdkFakeAgentPool(
sdkWithScaleDownMode(containerservice.ScaleDownModeDeallocate),
sdkWithProvisioningState("Succeeded"),
),
expected: sdkFakeAgentPool(),
expectedError: nil,
},
{
name: "parameters with an existing agent pool and update needed on spot max price",
spec: fakeAgentPool(),
existing: sdkFakeAgentPool(
sdkWithSpotMaxPrice(123.456),
sdkWithProvisioningState("Succeeded"),
),
expected: sdkFakeAgentPool(),
expectedError: nil,
},
{
name: "parameters with an existing agent pool and update needed on spot max price",
spec: fakeAgentPool(
withSpotMaxPrice("789.12345"),
),
existing: sdkFakeAgentPool(
sdkWithProvisioningState("Succeeded"),
),
expected: sdkFakeAgentPool(
sdkWithSpotMaxPrice(789.12345),
),
expectedError: nil,
},
{
name: "parameters with an existing agent pool and update needed on max count",
spec: fakeAgentPool(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -495,6 +495,14 @@ spec:
items:
type: string
type: array
scaleDownMode:
default: Delete
description: 'ScaleDownMode affects the cluster autoscaler behavior.
Default to Delete. Possible values include: ''Deallocate'', ''Delete'''
enum:
- Deallocate
- Delete
type: string
scaleSetPriority:
description: 'ScaleSetPriority specifies the ScaleSetPriority value.
Default to Regular. Possible values include: ''Regular'', ''Spot''
Expand All @@ -519,6 +527,18 @@ spec:
sku:
description: SKU is the size of the VMs in the node pool. Immutable.
type: string
spotMaxPrice:
anyOf:
- type: integer
- type: string
description: SpotMaxPrice defines max price to pay for spot instance.
Possible values are any decimal value greater than zero or -1. If
you set the max price to be -1, the VM won't be evicted based on
price. The price for the VM will be the current price for spot or
the price for a standard VM, which ever is less, as long as there's
capacity and quota available.
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
subnetName:
description: SubnetName specifies the Subnet where the MachinePool
will be placed Immutable.
Expand Down
138 changes: 138 additions & 0 deletions test/e2e/aks_spot.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,138 @@
//go:build e2e
// +build e2e

/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package e2e

import (
"context"
"k8s.io/apimachinery/pkg/api/resource"

"github.com/Azure/go-autorest/autorest/azure/auth"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1"
azureutil "sigs.k8s.io/cluster-api-provider-azure/util/azure"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/controller-runtime/pkg/client"
)

type AKSSpotSpecInput struct {
Cluster *clusterv1.Cluster
KubernetesVersion string
WaitIntervals []interface{}
}

func AKSSpotSpec(ctx context.Context, inputGetter func() AKSSpotSpecInput) {
input := inputGetter()

settings, err := auth.GetSettingsFromEnvironment()
Expect(err).NotTo(HaveOccurred())
subscriptionID := settings.GetSubscriptionID()

Check failure on line 52 in test/e2e/aks_spot.go

View workflow job for this annotation

GitHub Actions / coverage

subscriptionID declared and not used

Check failure on line 52 in test/e2e/aks_spot.go

View workflow job for this annotation

GitHub Actions / coverage

subscriptionID declared and not used
auth, err := azureutil.GetAuthorizer(settings)

Check failure on line 53 in test/e2e/aks_spot.go

View workflow job for this annotation

GitHub Actions / coverage

auth declared and not used

Check failure on line 53 in test/e2e/aks_spot.go

View workflow job for this annotation

GitHub Actions / coverage

auth declared and not used
Expect(err).NotTo(HaveOccurred())

mgmtClient := bootstrapClusterProxy.GetClient()
Expect(mgmtClient).NotTo(BeNil())

infraControlPlane := &infrav1.AzureManagedControlPlane{}
err = mgmtClient.Get(ctx, client.ObjectKey{Namespace: input.Cluster.Spec.ControlPlaneRef.Namespace, Name: input.Cluster.Spec.ControlPlaneRef.Name}, infraControlPlane)
Expect(err).NotTo(HaveOccurred())

resourceGroupName := infraControlPlane.Spec.ResourceGroupName

Check failure on line 63 in test/e2e/aks_spot.go

View workflow job for this annotation

GitHub Actions / coverage

resourceGroupName declared and not used) (typecheck)

Check failure on line 63 in test/e2e/aks_spot.go

View workflow job for this annotation

GitHub Actions / coverage

resourceGroupName declared and not used (typecheck)
scaling := infrav1.ManagedMachinePoolScaling{
MaxSize: pointer.Int32(9),
MinSize: pointer.Int32(0),
}
spotMaxPrice := resource.MustParse("123.456789")

By("Creating node pool")
infraMachinePool := &infrav1.AzureManagedMachinePool{
ObjectMeta: metav1.ObjectMeta{
Name: "poolspot",
Namespace: input.Cluster.Namespace,
},
Spec: infrav1.AzureManagedMachinePoolSpec{
Mode: "User",
SKU: "Standard_D2s_v3",
ScaleSetPriority: pointer.String("Spot"),
Scaling: &scaling,
SpotMaxPrice: &spotMaxPrice,
ScaleDownMode: pointer.String("Deallocate"),
},
}
err = mgmtClient.Create(ctx, infraMachinePool)
Expect(err).NotTo(HaveOccurred())

machinePool := &expv1.MachinePool{
ObjectMeta: metav1.ObjectMeta{
Namespace: infraMachinePool.Namespace,
Name: infraMachinePool.Name,
},
Spec: expv1.MachinePoolSpec{
ClusterName: input.Cluster.Name,
Replicas: pointer.Int32(0),
Template: clusterv1.MachineTemplateSpec{
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
DataSecretName: pointer.String(""),
},
ClusterName: input.Cluster.Name,
InfrastructureRef: corev1.ObjectReference{
APIVersion: infrav1.GroupVersion.String(),
Kind: "AzureManagedMachinePool",
Name: infraMachinePool.Name,
},
Version: pointer.String(input.KubernetesVersion),
},
},
},
}
err = mgmtClient.Create(ctx, machinePool)
Expect(err).NotTo(HaveOccurred())

defer func() {
By("Deleting the node pool")
err := mgmtClient.Delete(ctx, machinePool)
Expect(err).NotTo(HaveOccurred())

Eventually(func(g Gomega) {
err := mgmtClient.Get(ctx, client.ObjectKeyFromObject(machinePool), &expv1.MachinePool{})
g.Expect(apierrors.IsNotFound(err)).To(BeTrue())
}, input.WaitIntervals...).Should(Succeed(), "Deleted MachinePool %s/%s still exists", machinePool.Namespace, machinePool.Name)

Eventually(func(g Gomega) {
err := mgmtClient.Get(ctx, client.ObjectKeyFromObject(infraMachinePool), &infrav1.AzureManagedMachinePool{})
g.Expect(apierrors.IsNotFound(err)).To(BeTrue())
}, input.WaitIntervals...).Should(Succeed(), "Deleted AzureManagedMachinePool %s/%s still exists", infraMachinePool.Namespace, infraMachinePool.Name)
}()

By("Verifying the AzureManagedMachinePool becomes ready")
Eventually(func(g Gomega) {
infraMachinePool := &infrav1.AzureManagedMachinePool{}
err := mgmtClient.Get(ctx, client.ObjectKeyFromObject(machinePool), infraMachinePool)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(conditions.IsTrue(infraMachinePool, infrav1.AgentPoolsReadyCondition)).To(BeTrue())
}, input.WaitIntervals...).Should(Succeed())
}
Loading

0 comments on commit 25bc801

Please sign in to comment.