From 993afa3549c3c02becbb2078819791d6f0f5445a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 Sep 2024 09:24:57 +0000 Subject: [PATCH 1/3] Bump github/codeql-action from 3.26.5 to 3.26.8 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.26.5 to 3.26.8. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/2c779ab0d087cd7fe7b826087247c2c81f27bfa6...294a9d92911152fe08befb9ec03e240add280cb3) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/ci.yaml | 2 +- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecard.yml | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 8e1ac8b198..5d070a9812 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -73,7 +73,7 @@ jobs: output: 'trivy-results.sarif' severity: 'CRITICAL' - name: Upload Trivy scan results to GitHub Security tab - uses: github/codeql-action/upload-sarif@2c779ab0d087cd7fe7b826087247c2c81f27bfa6 # v3.26.5 + uses: github/codeql-action/upload-sarif@294a9d92911152fe08befb9ec03e240add280cb3 # v3.26.8 with: sarif_file: 'trivy-results.sarif' diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index c0e6788efa..34045470d4 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -52,7 +52,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@2c779ab0d087cd7fe7b826087247c2c81f27bfa6 # v3.26.5 + uses: github/codeql-action/init@294a9d92911152fe08befb9ec03e240add280cb3 # v3.26.8 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -66,7 +66,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, Go, Java, or Swift). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@2c779ab0d087cd7fe7b826087247c2c81f27bfa6 # v3.26.5 + uses: github/codeql-action/autobuild@294a9d92911152fe08befb9ec03e240add280cb3 # v3.26.8 # ℹī¸ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -79,6 +79,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@2c779ab0d087cd7fe7b826087247c2c81f27bfa6 # v3.26.5 + uses: github/codeql-action/analyze@294a9d92911152fe08befb9ec03e240add280cb3 # v3.26.8 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index be6a94fdfb..982beab130 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -67,6 +67,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@2c779ab0d087cd7fe7b826087247c2c81f27bfa6 # v2.25.0 + uses: github/codeql-action/upload-sarif@294a9d92911152fe08befb9ec03e240add280cb3 # v2.25.0 with: sarif_file: results.sarif From 198461e0567fc380c5e4c0b09a2d67b2c4f1dc1d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 Sep 2024 09:24:44 +0000 Subject: [PATCH 2/3] Bump ruby/setup-ruby from 1.190.0 to 1.193.0 Bumps [ruby/setup-ruby](https://github.com/ruby/setup-ruby) from 1.190.0 to 1.193.0. - [Release notes](https://github.com/ruby/setup-ruby/releases) - [Changelog](https://github.com/ruby/setup-ruby/blob/master/release.rb) - [Commits](https://github.com/ruby/setup-ruby/compare/a6e6f86333f0a2523ece813039b8b4be04560854...f321cf5a4d1533575411f8752cf25b86478b0442) --- updated-dependencies: - dependency-name: ruby/setup-ruby dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/license.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/license.yml b/.github/workflows/license.yml index c4f6e68dd9..8c70fb923f 100644 --- a/.github/workflows/license.yml +++ b/.github/workflows/license.yml @@ -20,7 +20,7 @@ jobs: steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Set up Ruby - uses: ruby/setup-ruby@a6e6f86333f0a2523ece813039b8b4be04560854 # v1.190.0 + uses: ruby/setup-ruby@f321cf5a4d1533575411f8752cf25b86478b0442 # v1.193.0 with: ruby-version: 2.6 - name: Install dependencies From 4f04e93f483526e24d51e4bbc081350d416ef042 Mon Sep 17 00:00:00 2001 From: Ai Ranthem <38308439+AiRanthem@users.noreply.github.com> Date: Tue, 24 Sep 2024 11:21:59 +0800 Subject: [PATCH 3/3] patches volume claim templates into pods before ValidatePodSpec in workloadspread patch validation (#1740) Signed-off-by: AiRanthem Co-authored-by: AiRanthem --- .../validating/workloadspread_validation.go | 21 ++- .../workloadspread_validation_test.go | 175 +++++++++++++++++- test/e2e/apps/workloadspread.go | 108 ----------- 3 files changed, 191 insertions(+), 113 deletions(-) diff --git a/pkg/webhook/workloadspread/validating/workloadspread_validation.go b/pkg/webhook/workloadspread/validating/workloadspread_validation.go index a7e37d3cf5..ac10c648f0 100644 --- a/pkg/webhook/workloadspread/validating/workloadspread_validation.go +++ b/pkg/webhook/workloadspread/validating/workloadspread_validation.go @@ -285,7 +285,6 @@ func validateWorkloadSpreadSubsets(ws *appsv1alpha1.WorkloadSpread, subsets []ap allErrs = append(allErrs, corevalidation.ValidateTolerations(coreTolerations, fldPath.Index(i).Child("tolerations"))...) } - //TODO validate patch if subset.Patch.Raw != nil { // In the case the WorkloadSpread is created before the workload,so no workloadTemplate is obtained, skip the remaining checks. if workloadTemplate != nil { @@ -293,7 +292,8 @@ func validateWorkloadSpreadSubsets(ws *appsv1alpha1.WorkloadSpread, subsets []ap var podSpec v1.PodTemplateSpec switch workloadTemplate.GetObjectKind().GroupVersionKind() { case controllerKruiseKindCS: - podSpec = workloadTemplate.(*appsv1alpha1.CloneSet).Spec.Template + cs := workloadTemplate.(*appsv1alpha1.CloneSet) + podSpec = withVolumeClaimTemplates(cs.Spec.Template, cs.Spec.VolumeClaimTemplates) case controllerKindDep: podSpec = workloadTemplate.(*appsv1.Deployment).Spec.Template case controllerKindRS: @@ -301,7 +301,8 @@ func validateWorkloadSpreadSubsets(ws *appsv1alpha1.WorkloadSpread, subsets []ap case controllerKindJob: podSpec = workloadTemplate.(*batchv1.Job).Spec.Template case controllerKindSts: - podSpec = workloadTemplate.(*appsv1.StatefulSet).Spec.Template + sts := workloadTemplate.(*appsv1.StatefulSet) + podSpec = withVolumeClaimTemplates(sts.Spec.Template, sts.Spec.VolumeClaimTemplates) } podBytes, _ := json.Marshal(podSpec) modified, err := strategicpatch.StrategicMergePatch(podBytes, subset.Patch.Raw, &v1.Pod{}) @@ -358,6 +359,20 @@ func validateWorkloadSpreadSubsets(ws *appsv1alpha1.WorkloadSpread, subsets []ap return allErrs } +func withVolumeClaimTemplates(pod v1.PodTemplateSpec, claims []v1.PersistentVolumeClaim) v1.PodTemplateSpec { + for _, pvc := range claims { + pod.Spec.Volumes = append(pod.Spec.Volumes, v1.Volume{ + Name: pvc.Name, + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvc.Name, + }, + }, + }) + } + return pod +} + func validateWorkloadSpreadConflict(ws *appsv1alpha1.WorkloadSpread, others []appsv1alpha1.WorkloadSpread, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} for _, other := range others { diff --git a/pkg/webhook/workloadspread/validating/workloadspread_validation_test.go b/pkg/webhook/workloadspread/validating/workloadspread_validation_test.go index 4c35603a15..17a461a94e 100644 --- a/pkg/webhook/workloadspread/validating/workloadspread_validation_test.go +++ b/pkg/webhook/workloadspread/validating/workloadspread_validation_test.go @@ -19,14 +19,17 @@ import ( "strconv" "testing" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/json" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/utils/pointer" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1" @@ -943,3 +946,171 @@ func TestValidateWorkloadSpreadConflict(t *testing.T) { }) } } + +func Test_validateWorkloadSpreadSubsets(t *testing.T) { + cloneset := &appsv1alpha1.CloneSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "CloneSet", + APIVersion: "apps.kruise.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cs", + }, + Spec: appsv1alpha1.CloneSetSpec{ + Replicas: ptr.To(int32(6)), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "test", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "test", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "main", + Image: "img:latest", + VolumeMounts: []corev1.VolumeMount{ + { + Name: "vol-1--0", + MountPath: "/logs", + SubPath: "logs", + }, + }, + }, + }, + }, + }, + VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "vol-1--0", + }, + }, + }, + }, + } + + sts := &appsv1.StatefulSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "StatefulSet", + APIVersion: "apps/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sts", + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: ptr.To(int32(6)), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "nginx", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "nginx", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "main", + Image: "img:latest", + VolumeMounts: []corev1.VolumeMount{ + { + Name: "vol-1--0", + MountPath: "/logs", + SubPath: "logs", + }, + }, + }, + }, + }, + }, + VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "vol-1--0", + }, + }, + }, + }, + } + patchData := map[string]any{ + "metadata": map[string]any{ + "annotations": map[string]any{ + "some-key": "some-value", + }, + }, + } + patch, _ := json.Marshal(patchData) + ws := &appsv1alpha1.WorkloadSpread{ + Spec: appsv1alpha1.WorkloadSpreadSpec{ + Subsets: []appsv1alpha1.WorkloadSpreadSubset{ + { + Name: "test", + Patch: runtime.RawExtension{ + Raw: patch, + }, + }, + }, + }, + } + + badCloneSet := cloneset.DeepCopy() + badCloneSet.Spec.VolumeClaimTemplates[0].Name = "bad-boy" + badSts := sts.DeepCopy() + badSts.Spec.VolumeClaimTemplates[0].Name = "bad-boy" + + testCases := []struct { + name string + workload client.Object + testFunc func(errList field.ErrorList) + }{ + { + name: "good cloneset", + workload: cloneset, + testFunc: func(errList field.ErrorList) { + if len(errList) != 0 { + t.Fatalf("expected 0 error, got %d, errList = %+v", len(errList), errList) + } + }, + }, { + name: "bad cloneset", + workload: badCloneSet, + testFunc: func(errList field.ErrorList) { + if len(errList) != 1 { + t.Fatalf("expected 1 error, got %d, errList = %+v", len(errList), errList) + } + }, + }, { + name: "good sts", + workload: sts, + testFunc: func(errList field.ErrorList) { + if len(errList) != 0 { + t.Fatalf("expected 0 error, got %d, errList = %+v", len(errList), errList) + } + }, + }, { + name: "bad sts", + workload: badSts, + testFunc: func(errList field.ErrorList) { + if len(errList) != 1 { + t.Fatalf("expected 1 error, got %d, errList = %+v", len(errList), errList) + } + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + tc.testFunc( + validateWorkloadSpreadSubsets(ws, ws.Spec.Subsets, tc.workload, field.NewPath("spec").Child("subsets")), + ) + }) + } +} diff --git a/test/e2e/apps/workloadspread.go b/test/e2e/apps/workloadspread.go index f9f514f7f2..defea8980b 100644 --- a/test/e2e/apps/workloadspread.go +++ b/test/e2e/apps/workloadspread.go @@ -1929,113 +1929,5 @@ var _ = SIGDescribe("workloadspread", func() { ginkgo.By("elastic deploy for deployment, zone-a=2, zone-b=nil, done") }) - - //ginkgo.It("deploy for job, zone-a=1, zone-b=nil", func() { - // job := tester.NewBaseJob(ns) - // // create workloadSpread - // targetRef := appsv1alpha1.TargetReference{ - // APIVersion: controllerKindJob.GroupVersion().String(), - // Kind: controllerKindJob.Kind, - // Name: job.Name, - // } - // subset1 := appsv1alpha1.WorkloadSpreadSubset{ - // Name: "zone-a", - // RequiredNodeSelectorTerm: &corev1.NodeSelectorTerm{ - // MatchExpressions: []corev1.NodeSelectorRequirement{ - // { - // Key: WorkloadSpreadFakeZoneKey, - // Operator: corev1.NodeSelectorOpIn, - // Values: []string{"zone-a"}, - // }, - // }, - // }, - // MaxReplicas: &intstr.IntOrString{Type: intstr.Int, IntVal: 1}, - // Patch: runtime.RawExtension{ - // Raw: []byte(`{"metadata":{"annotations":{"subset":"zone-a"}}}`), - // }, - // } - // subset2 := appsv1alpha1.WorkloadSpreadSubset{ - // Name: "zone-b", - // RequiredNodeSelectorTerm: &corev1.NodeSelectorTerm{ - // MatchExpressions: []corev1.NodeSelectorRequirement{ - // { - // Key: WorkloadSpreadFakeZoneKey, - // Operator: corev1.NodeSelectorOpIn, - // Values: []string{"zone-b"}, - // }, - // }, - // }, - // Patch: runtime.RawExtension{ - // Raw: []byte(`{"metadata":{"annotations":{"subset":"zone-b"}}}`), - // }, - // } - // workloadSpread := tester.NewWorkloadSpread(ns, workloadSpreadName, &targetRef, []appsv1alpha1.WorkloadSpreadSubset{subset1, subset2}) - // workloadSpread = tester.CreateWorkloadSpread(workloadSpread) - // - // job.Spec.Completions = pointer.Int32Ptr(10) - // job.Spec.Parallelism = pointer.Int32Ptr(2) - // job.Spec.Template.Spec.RestartPolicy = corev1.RestartPolicyNever - // job = tester.CreateJob(job) - // tester.WaitJobCompleted(job) - // - // // get pods, and check workloadSpread - // ginkgo.By(fmt.Sprintf("get job(%s/%s) pods, and check workloadSpread(%s/%s) status", job.Namespace, job.Name, workloadSpread.Namespace, workloadSpread.Name)) - // faster, err := util.GetFastLabelSelector(job.Spec.Selector) - // gomega.Expect(err).NotTo(gomega.HaveOccurred()) - // podList, err := tester.C.CoreV1().Pods(job.Namespace).List(metav1.ListOptions{LabelSelector: faster.String()}) - // gomega.Expect(err).NotTo(gomega.HaveOccurred()) - // - // matchedPods := make([]corev1.Pod, 0, len(podList.Items)) - // for i := range podList.Items { - // if podList.Items[i].Status.Phase == corev1.PodSucceeded { - // matchedPods = append(matchedPods, podList.Items[i]) - // } - // } - // - // pods := matchedPods - // gomega.Expect(pods).To(gomega.HaveLen(10)) - // subset1Pods := 0 - // subset2Pods := 0 - // for _, pod := range pods { - // if str, ok := pod.Annotations[workloadspread.MatchedWorkloadSpreadSubsetAnnotations]; ok { - // var injectWorkloadSpread *workloadspread.InjectWorkloadSpread - // err := json.Unmarshal([]byte(str), &injectWorkloadSpread) - // gomega.Expect(err).NotTo(gomega.HaveOccurred()) - // if injectWorkloadSpread.Subset == subset1.Name { - // subset1Pods++ - // gomega.Expect(injectWorkloadSpread.Name).To(gomega.Equal(workloadSpread.Name)) - // gomega.Expect(pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions).To(gomega.Equal(subset1.RequiredNodeSelectorTerm.MatchExpressions)) - // gomega.Expect(pod.Annotations["subset"]).To(gomega.Equal(subset1.Name)) - // } else if injectWorkloadSpread.Subset == subset2.Name { - // subset2Pods++ - // gomega.Expect(injectWorkloadSpread.Name).To(gomega.Equal(workloadSpread.Name)) - // gomega.Expect(pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions).To(gomega.Equal(subset2.RequiredNodeSelectorTerm.MatchExpressions)) - // gomega.Expect(pod.Annotations["subset"]).To(gomega.Equal(subset2.Name)) - // } - // } else { - // // others PodDeletionCostAnnotation not set - // gomega.Expect(pod.Annotations[workloadspread.PodDeletionCostAnnotation]).To(gomega.Equal("")) - // } - // } - // gomega.Expect(subset1Pods).To(gomega.Equal(5)) - // gomega.Expect(subset2Pods).To(gomega.Equal(5)) - // - // // check workloadSpread status - // ginkgo.By(fmt.Sprintf("check workloadSpread(%s/%s) status", workloadSpread.Namespace, workloadSpread.Name)) - // workloadSpread, err = kc.AppsV1alpha1().WorkloadSpreads(workloadSpread.Namespace).Get(workloadSpread.Name, metav1.GetOptions{}) - // gomega.Expect(err).NotTo(gomega.HaveOccurred()) - // - // gomega.Expect(workloadSpread.Status.SubsetStatuses[0].Name).To(gomega.Equal(workloadSpread.Spec.Subsets[0].Name)) - // gomega.Expect(workloadSpread.Status.SubsetStatuses[0].MissingReplicas).To(gomega.Equal(int32(1))) - // gomega.Expect(len(workloadSpread.Status.SubsetStatuses[0].CreatingPods)).To(gomega.Equal(0)) - // gomega.Expect(len(workloadSpread.Status.SubsetStatuses[0].DeletingPods)).To(gomega.Equal(0)) - // - // gomega.Expect(workloadSpread.Status.SubsetStatuses[1].Name).To(gomega.Equal(workloadSpread.Spec.Subsets[1].Name)) - // gomega.Expect(workloadSpread.Status.SubsetStatuses[1].MissingReplicas).To(gomega.Equal(int32(-1))) - // gomega.Expect(len(workloadSpread.Status.SubsetStatuses[1].CreatingPods)).To(gomega.Equal(0)) - // gomega.Expect(len(workloadSpread.Status.SubsetStatuses[1].DeletingPods)).To(gomega.Equal(0)) - // - // ginkgo.By("workloadSpread for job, done") - //}) }) })