Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: support kruise daemonset/statefulset #1474

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
df20ca0
support kruise daemonset
daimaxiaxie Jul 28, 2024
6a05019
support kruise statefulset
daimaxiaxie Jul 28, 2024
c0cb387
add kruise daemonset controller
daimaxiaxie Aug 3, 2024
a2f8ecd
replace daemonset in kruise daemonset controller
daimaxiaxie Aug 3, 2024
d3ed25a
complete kruise daemonset controller
daimaxiaxie Aug 4, 2024
58f263e
format import
daimaxiaxie Aug 9, 2024
129c084
fix rebase
daimaxiaxie Aug 10, 2024
cce4c91
fix version
daimaxiaxie Aug 12, 2024
32fdb33
add kruise daemonset crd in unit test
daimaxiaxie Aug 12, 2024
84b574f
fix should update daemonsetCache with the newest created kruise daemo…
daimaxiaxie Aug 13, 2024
07f8b4a
more kruise daemonset controller unit test
daimaxiaxie Aug 16, 2024
b7aae63
added termination unit test for IsOwnedByDaemonSet
daimaxiaxie Aug 18, 2024
2ff3b83
fix termination unit test
daimaxiaxie Aug 18, 2024
1d565af
add scheduling unit test for IsOwnedByStatefulSet
daimaxiaxie Aug 18, 2024
4dec000
add Kruise StatefulSet crd
daimaxiaxie Aug 18, 2024
c83b9aa
format import
daimaxiaxie Aug 18, 2024
66857f4
fix import kruise crds in scheduling suite test
daimaxiaxie Aug 18, 2024
601923e
fix kruise statefulset schema
daimaxiaxie Aug 19, 2024
1437f15
fix kruise daemonset schema
daimaxiaxie Aug 20, 2024
e46bd8a
add scheduling suit test for kruise daemonset
daimaxiaxie Aug 20, 2024
c7ee472
add should account for overhead with kruise daemonset test case
daimaxiaxie Aug 20, 2024
c018f7a
clean kruise object after each
daimaxiaxie Aug 20, 2024
57c2620
fix rebase
daimaxiaxie Aug 30, 2024
b587a08
update go mod
daimaxiaxie Aug 30, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ require (
github.com/prometheus/common v0.55.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/robfig/cron/v3 v3.0.1
github.com/spf13/cobra v1.7.0 // indirect
github.com/spf13/cobra v1.8.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
golang.org/x/net v0.28.0 // indirect
Expand All @@ -84,6 +84,8 @@ require (
sigs.k8s.io/yaml v1.4.0 // indirect
)

require github.com/openkruise/kruise v1.7.1

retract (
v0.100.101-test // accidentally published testing version
v0.35.3 // accidentally published incomplete patch release
Expand Down
8 changes: 5 additions & 3 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
Expand Down Expand Up @@ -89,6 +89,8 @@ github.com/onsi/ginkgo/v2 v2.20.1 h1:YlVIbqct+ZmnEph770q9Q7NVAz4wwIiVNahee6JyUzo
github.com/onsi/ginkgo/v2 v2.20.1/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI=
github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k=
github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY=
github.com/openkruise/kruise v1.7.1 h1:0wWq+p6TZQavo72O+CzCogtN5uQsv5b6m8RLWr9kC48=
github.com/openkruise/kruise v1.7.1/go.mod h1:l0BlhOh5Szp0c0LDJK6wxfPHnyy2kQ3N6ayDT9gHHus=
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
Expand All @@ -110,8 +112,8 @@ github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncj
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/samber/lo v1.47.0 h1:z7RynLwP5nbyRscyvcD043DWYoOcYRv3mV8lBeqOCLc=
github.com/samber/lo v1.47.0/go.mod h1:RmDH9Ct32Qy3gduHQuKJ3gW1fMHAnE/fAzQuf6He5cU=
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
Expand Down
38 changes: 33 additions & 5 deletions pkg/controllers/node/termination/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ func TestAPIs(t *testing.T) {

var _ = BeforeSuite(func() {
fakeClock = clock.NewFakeClock(time.Now())
env = test.NewEnvironment(test.WithCRDs(apis.CRDs...), test.WithCRDs(v1alpha1.CRDs...), test.WithFieldIndexers(test.NodeClaimFieldIndexer(ctx), test.VolumeAttachmentFieldIndexer(ctx)))
env = test.NewEnvironment(test.WithCRDs(apis.CRDs...), test.WithCRDs(v1alpha1.CRDs...), test.WithCRDs(test.KruiseCRDs...), test.WithFieldIndexers(test.NodeClaimFieldIndexer(ctx), test.VolumeAttachmentFieldIndexer(ctx)))

cloudProvider = fake.NewCloudProvider()
recorder = test.NewEventRecorder()
Expand All @@ -89,7 +89,7 @@ var _ = Describe("Termination", func() {
})

AfterEach(func() {
ExpectCleanedUp(ctx, env.Client)
ExpectCleanedUp(ctx, env.Client, test.KruiseObjects...)
fakeClock.SetTime(time.Now())
cloudProvider.Reset()
queue.Reset()
Expand Down Expand Up @@ -366,7 +366,10 @@ var _ = Describe("Termination", func() {
daemonEvict := test.DaemonSet()
daemonNodeCritical := test.DaemonSet(test.DaemonSetOptions{PodOptions: test.PodOptions{PriorityClassName: "system-node-critical"}})
daemonClusterCritical := test.DaemonSet(test.DaemonSetOptions{PodOptions: test.PodOptions{PriorityClassName: "system-cluster-critical"}})
ExpectApplied(ctx, env.Client, daemonEvict, daemonNodeCritical, daemonClusterCritical)
kruiseDaemonEvict := test.KruiseDaemonSet()
kruiseDaemonNodeCritical := test.KruiseDaemonSet(test.DaemonSetOptions{PodOptions: test.PodOptions{PriorityClassName: "system-node-critical"}})
kruiseDaemonClusterCritical := test.KruiseDaemonSet(test.DaemonSetOptions{PodOptions: test.PodOptions{PriorityClassName: "system-cluster-critical"}})
ExpectApplied(ctx, env.Client, daemonEvict, daemonNodeCritical, daemonClusterCritical, kruiseDaemonEvict, kruiseDaemonNodeCritical, kruiseDaemonClusterCritical)

podEvict := test.Pod(test.PodOptions{NodeName: node.Name, ObjectMeta: metav1.ObjectMeta{OwnerReferences: defaultOwnerRefs}})
podDaemonEvict := test.Pod(test.PodOptions{NodeName: node.Name, ObjectMeta: metav1.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{
Expand Down Expand Up @@ -396,12 +399,37 @@ var _ = Describe("Termination", func() {
BlockOwnerDeletion: lo.ToPtr(true),
}}}})

ExpectApplied(ctx, env.Client, node, nodeClaim, podEvict, podNodeCritical, podClusterCritical, podDaemonEvict, podDaemonNodeCritical, podDaemonClusterCritical)
podKruiseDaemonEvict := test.Pod(test.PodOptions{NodeName: node.Name, ObjectMeta: metav1.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{
APIVersion: "apps.kruise.io/v1alpha1",
Kind: "DaemonSet",
Name: kruiseDaemonEvict.Name,
UID: kruiseDaemonEvict.UID,
Controller: lo.ToPtr(true),
BlockOwnerDeletion: lo.ToPtr(true),
}}}})
podKruiseDaemonNodeCritical := test.Pod(test.PodOptions{NodeName: node.Name, PriorityClassName: "system-node-critical", ObjectMeta: metav1.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{
APIVersion: "apps.kruise.io/v1alpha1",
Kind: "DaemonSet",
Name: kruiseDaemonNodeCritical.Name,
UID: kruiseDaemonClusterCritical.UID,
Controller: lo.ToPtr(true),
BlockOwnerDeletion: lo.ToPtr(true),
}}}})
podKruiseDaemonClusterCritical := test.Pod(test.PodOptions{NodeName: node.Name, PriorityClassName: "system-cluster-critical", ObjectMeta: metav1.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{
APIVersion: "apps.kruise.io/v1alpha1",
Kind: "DaemonSet",
Name: kruiseDaemonClusterCritical.Name,
UID: kruiseDaemonClusterCritical.UID,
Controller: lo.ToPtr(true),
BlockOwnerDeletion: lo.ToPtr(true),
}}}})

ExpectApplied(ctx, env.Client, node, nodeClaim, podEvict, podNodeCritical, podClusterCritical, podDaemonEvict, podDaemonNodeCritical, podDaemonClusterCritical, podKruiseDaemonEvict, podKruiseDaemonNodeCritical, podKruiseDaemonClusterCritical)

// Trigger Termination Controller
Expect(env.Client.Delete(ctx, node)).To(Succeed())

podGroups := [][]*corev1.Pod{{podEvict}, {podDaemonEvict}, {podNodeCritical, podClusterCritical}, {podDaemonNodeCritical, podDaemonClusterCritical}}
podGroups := [][]*corev1.Pod{{podEvict}, {podDaemonEvict, podKruiseDaemonEvict}, {podNodeCritical, podClusterCritical}, {podDaemonNodeCritical, podDaemonClusterCritical, podKruiseDaemonNodeCritical, podKruiseDaemonClusterCritical}}
for i, podGroup := range podGroups {
node = ExpectNodeExists(ctx, env.Client, node.Name)
for _, p := range podGroup {
Expand Down
34 changes: 28 additions & 6 deletions pkg/controllers/provisioning/provisioner.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,10 @@ import (
"strings"
"time"

kruise "github.com/openkruise/kruise/apis/apps/v1alpha1"

"sigs.k8s.io/karpenter/pkg/operator/options"

"github.com/awslabs/operatorpkg/option"
"github.com/awslabs/operatorpkg/status"

Expand Down Expand Up @@ -417,25 +421,43 @@ func (p *Provisioner) getDaemonSetPods(ctx context.Context) ([]*corev1.Pod, erro
return nil, fmt.Errorf("listing daemonsets, %w", err)
}

return lo.Map(daemonSetList.Items, func(d appsv1.DaemonSet, _ int) *corev1.Pod {
pod := p.cluster.GetDaemonSetPod(&d)
handler := func(pod *corev1.Pod, template corev1.PodTemplateSpec) *corev1.Pod {
if pod == nil {
pod = &corev1.Pod{Spec: d.Spec.Template.Spec}
pod = &corev1.Pod{Spec: template.Spec}
}
// Replacing retrieved pod affinity with daemonset pod template required node affinity since this is overridden
// by the daemonset controller during pod creation
// https://github.com/kubernetes/kubernetes/blob/c5cf0ac1889f55ab51749798bec684aed876709d/pkg/controller/daemon/util/daemonset_util.go#L176
if d.Spec.Template.Spec.Affinity != nil && d.Spec.Template.Spec.Affinity.NodeAffinity != nil && d.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
if template.Spec.Affinity != nil && template.Spec.Affinity.NodeAffinity != nil && template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
if pod.Spec.Affinity == nil {
pod.Spec.Affinity = &corev1.Affinity{}
}
if pod.Spec.Affinity.NodeAffinity == nil {
pod.Spec.Affinity.NodeAffinity = &corev1.NodeAffinity{}
}
pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = d.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution
pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution
}
return pod
}), nil
}

pods := lo.Map(daemonSetList.Items, func(d appsv1.DaemonSet, _ int) *corev1.Pod {
pod := p.cluster.GetDaemonSetPod(&d)
return handler(pod, d.Spec.Template)
})

if options.FromContext(ctx).SupportKruise {
kruiseDaemonSetList := &kruise.DaemonSetList{}
if err := p.kubeClient.List(ctx, kruiseDaemonSetList); err != nil {
return nil, fmt.Errorf("listing kruise daemonsets, %w", err)
}

pods = append(pods, lo.Map(kruiseDaemonSetList.Items, func(d kruise.DaemonSet, _ int) *corev1.Pod {
pod := p.cluster.GetDaemonSetPod(&d)
return handler(pod, d.Spec.Template)
})...)
}

return pods, nil
}

func (p *Provisioner) Validate(ctx context.Context, pod *corev1.Pod) error {
Expand Down
110 changes: 107 additions & 3 deletions pkg/controllers/provisioning/scheduling/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,8 +87,8 @@ func TestScheduling(t *testing.T) {
}

var _ = BeforeSuite(func() {
env = test.NewEnvironment(test.WithCRDs(apis.CRDs...), test.WithCRDs(v1alpha1.CRDs...))
ctx = options.ToContext(ctx, test.Options())
env = test.NewEnvironment(test.WithCRDs(apis.CRDs...), test.WithCRDs(v1alpha1.CRDs...), test.WithCRDs(test.KruiseCRDs...))
ctx = options.ToContext(ctx, test.Options(test.OptionsFields{SupportKruise: lo.ToPtr(false)}))
cloudProvider = fake.NewCloudProvider()
instanceTypes, _ := cloudProvider.GetInstanceTypes(ctx, nil)
// set these on the cloud provider, so we can manipulate them if needed
Expand All @@ -114,7 +114,7 @@ var _ = BeforeEach(func() {
})

var _ = AfterEach(func() {
ExpectCleanedUp(ctx, env.Client)
ExpectCleanedUp(ctx, env.Client, test.KruiseObjects...)
cluster.Reset()
scheduling.QueueDepth.Reset()
scheduling.SchedulingDurationSeconds.Reset()
Expand Down Expand Up @@ -3553,6 +3553,63 @@ var _ = Context("Scheduling", func() {
nodes := ExpectNodes(ctx, env.Client)
Expect(nodes).To(HaveLen(1))
})
It("should not re-schedule pods from a deleting node when pods are owned by a Kruise DaemonSet", func() {
ds := test.KruiseDaemonSet()
ExpectApplied(ctx, env.Client, nodePool, ds)

pod := test.UnschedulablePod(
test.PodOptions{
ObjectMeta: metav1.ObjectMeta{
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps.kruise.io/v1alpha1",
Kind: "DaemonSet",
Name: ds.Name,
UID: ds.UID,
Controller: lo.ToPtr(true),
BlockOwnerDeletion: lo.ToPtr(true),
},
},
},
ResourceRequirements: corev1.ResourceRequirements{
Requests: map[corev1.ResourceName]resource.Quantity{
corev1.ResourceMemory: resource.MustParse("100M"),
},
},
},
)
nodeClaim, node := test.NodeClaimAndNode(v1.NodeClaim{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1.NodePoolLabelKey: nodePool.Name,
corev1.LabelInstanceTypeStable: "small-instance-type",
v1.CapacityTypeLabelKey: v1.CapacityTypeOnDemand,
corev1.LabelTopologyZone: "test-zone-1a",
},
},
Status: v1.NodeClaimStatus{
Allocatable: map[corev1.ResourceName]resource.Quantity{corev1.ResourceCPU: resource.MustParse("32")},
},
})
ExpectApplied(ctx, env.Client, nodeClaim, node, pod)

ExpectManualBinding(ctx, env.Client, pod, node)

// Mark for deletion so that we consider all pods on this node for reschedulability
cluster.MarkForDeletion(node.Spec.ProviderID)

// Trigger an eviction to set the deletion timestamp but not delete the pod
ExpectEvicted(ctx, env.Client, pod)
ExpectExists(ctx, env.Client, pod)

// Trigger a provisioning loop and expect that we don't create more nodes since we don't consider
// generic terminating pods
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov)

// We shouldn't create an additional node here because this is a standard pod
nodes := ExpectNodes(ctx, env.Client)
Expect(nodes).To(HaveLen(1))
})
It("should not reschedule pods from a deleting node when pods are not active and they are owned by a ReplicaSet", func() {
rs := test.ReplicaSet()
ExpectApplied(ctx, env.Client, nodePool, rs)
Expand Down Expand Up @@ -3639,6 +3696,53 @@ var _ = Context("Scheduling", func() {
nodes := ExpectNodes(ctx, env.Client)
Expect(nodes).To(HaveLen(2))

// Expect both nodes to be of the same size to schedule the pod once it gets re-created
for _, n := range nodes {
Expect(n.Labels[corev1.LabelInstanceTypeStable]).To(Equal("small-instance-type"))
}
})
It("should reschedule pods from a deleting node when pods are not active and they are owned by a Kruise StatefulSet", func() {
ss := test.KruiseStatefulSet()
ExpectApplied(ctx, env.Client, nodePool, ss)

pod := test.UnschedulablePod(
test.PodOptions{
ObjectMeta: metav1.ObjectMeta{
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps.kruise.io/v1beta1",
Kind: "StatefulSet",
Name: ss.Name,
UID: ss.UID,
Controller: lo.ToPtr(true),
BlockOwnerDeletion: lo.ToPtr(true),
},
},
},
ResourceRequirements: corev1.ResourceRequirements{
Requests: map[corev1.ResourceName]resource.Quantity{
corev1.ResourceMemory: resource.MustParse("100M"),
},
},
},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels[corev1.LabelInstanceTypeStable]).To(Equal("small-instance-type"))

// Mark for deletion so that we consider all pods on this node for reschedulability
cluster.MarkForDeletion(node.Spec.ProviderID)

// Trigger an eviction to set the deletion timestamp but not delete the pod
ExpectEvicted(ctx, env.Client, pod)
ExpectExists(ctx, env.Client, pod)

// Trigger a provisioning loop and expect another node to get created
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov)

nodes := ExpectNodes(ctx, env.Client)
Expect(nodes).To(HaveLen(2))

// Expect both nodes to be of the same size to schedule the pod once it gets re-created
for _, n := range nodes {
Expect(n.Labels[corev1.LabelInstanceTypeStable]).To(Equal("small-instance-type"))
Expand Down
38 changes: 35 additions & 3 deletions pkg/controllers/provisioning/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ func TestAPIs(t *testing.T) {
}

var _ = BeforeSuite(func() {
env = test.NewEnvironment(test.WithCRDs(apis.CRDs...), test.WithCRDs(v1alpha1.CRDs...))
env = test.NewEnvironment(test.WithCRDs(apis.CRDs...), test.WithCRDs(v1alpha1.CRDs...), test.WithCRDs(test.KruiseCRDs...))
ctx = options.ToContext(ctx, test.Options())
cloudProvider = fake.NewCloudProvider()
fakeClock = clock.NewFakeClock(time.Now())
Expand All @@ -87,7 +87,7 @@ var _ = BeforeSuite(func() {
})

var _ = BeforeEach(func() {
ctx = options.ToContext(ctx, test.Options())
ctx = options.ToContext(ctx, test.Options(test.OptionsFields{SupportKruise: lo.ToPtr(true)}))
cloudProvider.Reset()
})

Expand All @@ -96,7 +96,7 @@ var _ = AfterSuite(func() {
})

var _ = AfterEach(func() {
ExpectCleanedUp(ctx, env.Client)
ExpectCleanedUp(ctx, env.Client, test.KruiseObjects...)
cloudProvider.Reset()
cluster.Reset()
})
Expand Down Expand Up @@ -604,6 +604,38 @@ var _ = Describe("Provisioning", func() {
Expect(*allocatable.Cpu()).To(Equal(resource.MustParse("4")))
Expect(*allocatable.Memory()).To(Equal(resource.MustParse("4Gi")))
})
It("should account for overhead with kruise daemonset", func() {
ExpectApplied(ctx, env.Client, test.NodePool(), test.DaemonSet(
test.DaemonSetOptions{PodOptions: test.PodOptions{
ResourceRequirements: corev1.ResourceRequirements{Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("1Gi")}},
}},
))
ExpectApplied(ctx, env.Client, test.NodePool(), test.KruiseDaemonSet(
test.DaemonSetOptions{PodOptions: test.PodOptions{
ResourceRequirements: corev1.ResourceRequirements{Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("2"), corev1.ResourceMemory: resource.MustParse("2Gi")}},
}},
))
pod := test.UnschedulablePod(
test.PodOptions{
ResourceRequirements: corev1.ResourceRequirements{Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("1Gi")}},
},
)
cloudProvider.InstanceTypes = AddInstanceResources(cloudProvider.InstanceTypes, corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse(fmt.Sprintf("%d", 4)),
corev1.ResourceMemory: resource.MustParse(fmt.Sprintf("%dGi", 4)),
})
cloudProvider.InstanceTypes = AddInstanceResources(cloudProvider.InstanceTypes, corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse(fmt.Sprintf("%d", 8)),
corev1.ResourceMemory: resource.MustParse(fmt.Sprintf("%dGi", 8)),
})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)

ExpectResources(corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("8"),
corev1.ResourceMemory: resource.MustParse("8Gi"),
}, node.Status.Capacity)
})
It("should account for overhead (with startup taint)", func() {
nodePool := test.NodePool(v1.NodePool{
Spec: v1.NodePoolSpec{
Expand Down
Loading
Loading