Skip to content

Commit

Permalink
Deprecate name/namespace labels of work
Browse files Browse the repository at this point in the history
Signed-off-by: whitewindmills <[email protected]>
  • Loading branch information
whitewindmills committed Apr 24, 2024
1 parent 4898c0f commit 7e700d7
Show file tree
Hide file tree
Showing 10 changed files with 103 additions and 112 deletions.
6 changes: 0 additions & 6 deletions pkg/apis/work/v1alpha2/well_known_constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,12 +58,6 @@ const (
// - Manifest in Work object: describes the name of ClusterResourceBinding which the manifest derived from.
ClusterResourceBindingAnnotationKey = "clusterresourcebinding.karmada.io/name"

// WorkNamespaceLabel is added to objects to specify associated Work's namespace.
WorkNamespaceLabel = "work.karmada.io/namespace"

// WorkNameLabel is added to objects to specify associated Work's name.
WorkNameLabel = "work.karmada.io/name"

// BindingManagedByLabel is added to ResourceBinding to represent what kind of resource manages this Binding.
BindingManagedByLabel = "binding.karmada.io/managed-by"
)
Expand Down
53 changes: 16 additions & 37 deletions pkg/controllers/execution/execution_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ func (c *Controller) Reconcile(ctx context.Context, req controllerruntime.Reques
}

if !util.IsClusterReady(&cluster.Status) {
klog.Errorf("Stop sync work(%s/%s) for cluster(%s) as cluster not ready.", work.Namespace, work.Name, cluster.Name)
klog.Errorf("Stop syncing the work(%s/%s) for the cluster(%s) as cluster not ready.", work.Namespace, work.Name, cluster.Name)
return controllerruntime.Result{}, fmt.Errorf("cluster(%s) not ready", cluster.Name)
}

Expand All @@ -133,56 +133,37 @@ func (c *Controller) syncWork(clusterName string, work *workv1alpha1.Work) (cont
err := c.syncToClusters(clusterName, work)
metrics.ObserveSyncWorkloadLatency(err, start)
if err != nil {
msg := fmt.Sprintf("Failed to sync work(%s) to cluster(%s): %v", work.Name, clusterName, err)
msg := fmt.Sprintf("Failed to sync work(%s/%s) to cluster(%s), err: %v", work.Namespace, work.Name, clusterName, err)
klog.Errorf(msg)
c.EventRecorder.Event(work, corev1.EventTypeWarning, events.EventReasonSyncWorkloadFailed, msg)
return controllerruntime.Result{}, err
}
msg := fmt.Sprintf("Sync work (%s) to cluster(%s) successful.", work.Name, clusterName)
msg := fmt.Sprintf("Sync work(%s/%s) to cluster(%s) successful.", work.Namespace, work.Name, clusterName)
klog.V(4).Infof(msg)
c.EventRecorder.Event(work, corev1.EventTypeNormal, events.EventReasonSyncWorkloadSucceed, msg)
return controllerruntime.Result{}, nil
}

// tryDeleteWorkload tries to delete resource in the given member cluster.
// tryDeleteWorkload tries to delete resources in the given member cluster.
func (c *Controller) tryDeleteWorkload(clusterName string, work *workv1alpha1.Work) error {
var errs []error
for _, manifest := range work.Spec.Workload.Manifests {
workload := &unstructured.Unstructured{}
err := workload.UnmarshalJSON(manifest.Raw)
if err != nil {
klog.Errorf("Failed to unmarshal workload, error is: %v", err)
return err
}

fedKey, err := keys.FederatedKeyFunc(clusterName, workload)
if err != nil {
klog.Errorf("Failed to get FederatedKey %s, error: %v", workload.GetName(), err)
return err
}

clusterObj, err := helper.GetObjectFromCache(c.RESTMapper, c.InformerManager, fedKey)
if err != nil {
if apierrors.IsNotFound(err) {
return nil
}
klog.Errorf("Failed to get resource %v from member cluster, err is %v ", workload.GetName(), err)
return err
}

// Avoid deleting resources that not managed by karmada.
if util.GetLabelValue(clusterObj.GetLabels(), util.ManagedByKarmadaLabel) != util.ManagedByKarmadaLabelValue {
klog.Infof("Abort deleting the resource(kind=%s, %s/%s) exists in cluster %v but not managed by karmada", clusterObj.GetKind(), clusterObj.GetNamespace(), clusterObj.GetName(), clusterName)
return nil
klog.Errorf("Failed to unmarshal workload of the work(%s/%s), error is: %v", work.GetNamespace(), work.GetName(), err)
errs = append(errs, err)
continue
}
util.MergeLabel(workload, workv1alpha2.WorkPermanentIDLabel, util.GetLabelValue(work.Labels, workv1alpha2.WorkPermanentIDLabel))

err = c.ObjectWatcher.Delete(clusterName, workload)
if err != nil {
klog.Errorf("Failed to delete resource in the given member cluster %v, err is %v", clusterName, err)
return err
errs = append(errs, err)
}
}

return nil
return errors.NewAggregate(errs)
}

// removeFinalizer remove finalizer from the given Work
Expand All @@ -207,7 +188,7 @@ func (c *Controller) syncToClusters(clusterName string, work *workv1alpha1.Work)
workload := &unstructured.Unstructured{}
err := workload.UnmarshalJSON(manifest.Raw)
if err != nil {
klog.Errorf("Failed to unmarshal workload, error is: %v", err)
klog.Errorf("Failed to unmarshal workload of the work(%s/%s), error is: %v", work.GetNamespace(), work.GetName(), err)
errs = append(errs, err)
continue
}
Expand Down Expand Up @@ -253,26 +234,24 @@ func (c *Controller) tryCreateOrUpdateWorkload(clusterName string, workload *uns
clusterObj, err := helper.GetObjectFromCache(c.RESTMapper, c.InformerManager, fedKey)
if err != nil {
if !apierrors.IsNotFound(err) {
klog.Errorf("Failed to get resource %v from member cluster, err is %v ", workload.GetName(), err)
klog.Errorf("Failed to get the resource(kind=%s, %s/%s) from member cluster(%s), err is %v ", workload.GetKind(), workload.GetNamespace(), workload.GetName(), err)
return err
}
err = c.ObjectWatcher.Create(clusterName, workload)
if err != nil {
klog.Errorf("Failed to create resource(%v/%v) in the given member cluster %s, err is %v", workload.GetNamespace(), workload.GetName(), clusterName, err)
return err
}
return nil
}

err = c.ObjectWatcher.Update(clusterName, workload, clusterObj)
if err != nil {
klog.Errorf("Failed to update resource in the given member cluster %s, err is %v", clusterName, err)
return err
}
return nil
}

// updateAppliedCondition update the Applied condition for the given Work
// updateAppliedCondition updates the applied condition for the given Work.
func (c *Controller) updateAppliedCondition(work *workv1alpha1.Work, status metav1.ConditionStatus, reason, message string) error {
newWorkAppliedCondition := metav1.Condition{
Type: workv1alpha1.WorkApplied,
Expand All @@ -292,7 +271,7 @@ func (c *Controller) updateAppliedCondition(work *workv1alpha1.Work, status meta
if err = c.Get(context.TODO(), client.ObjectKey{Namespace: work.Namespace, Name: work.Name}, updated); err == nil {
work = updated
} else {
klog.Errorf("Failed to get updated work %s/%s: %v", work.Namespace, work.Name, err)
klog.Errorf("Failed to get the updated work(%s/%s), err: %v", work.Namespace, work.Name, err)
}
return updateErr
})
Expand All @@ -301,7 +280,7 @@ func (c *Controller) updateAppliedCondition(work *workv1alpha1.Work, status meta
func (c *Controller) eventf(object *unstructured.Unstructured, eventType, reason, messageFmt string, args ...interface{}) {
ref, err := helper.GenEventRef(object)
if err != nil {
klog.Errorf("ignore event(%s) as failed to build event reference for: kind=%s, %s due to %v", reason, object.GetKind(), klog.KObj(object), err)
klog.Errorf("Ignore event(%s) as failed to build event reference for: kind=%s, %s due to %v", reason, object.GetKind(), klog.KObj(object), err)
return
}
c.EventRecorder.Eventf(ref, eventType, reason, messageFmt, args...)
Expand Down
35 changes: 14 additions & 21 deletions pkg/controllers/mcs/endpointslice_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/predicate"

workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
"github.com/karmada-io/karmada/pkg/util"
"github.com/karmada-io/karmada/pkg/util/helper"
"github.com/karmada-io/karmada/pkg/util/names"
Expand All @@ -55,24 +56,17 @@ func (c *EndpointSliceController) Reconcile(ctx context.Context, req controllerr
work := &workv1alpha1.Work{}
if err := c.Client.Get(ctx, req.NamespacedName, work); err != nil {
if apierrors.IsNotFound(err) {
// Cleanup derived EndpointSlices after work has been removed.
err = helper.DeleteEndpointSlice(c.Client, labels.Set{
workv1alpha1.WorkNamespaceLabel: req.Namespace,
workv1alpha1.WorkNameLabel: req.Name,
})
if err == nil {
return controllerruntime.Result{}, nil
}
return controllerruntime.Result{}, nil
}
return controllerruntime.Result{}, err
}

if !work.DeletionTimestamp.IsZero() {
err := helper.DeleteEndpointSlice(c.Client, labels.Set{
workv1alpha1.WorkNamespaceLabel: req.Namespace,
workv1alpha1.WorkNameLabel: req.Name,
})
if err != nil {
// Cleanup derived EndpointSlices when deleting the work.
if err := helper.DeleteEndpointSlice(c.Client, labels.Set{
workv1alpha2.WorkPermanentIDLabel: work.Labels[workv1alpha2.WorkPermanentIDLabel],
}); err != nil {
klog.Errorf("Failed to delete endpointslice of the work(%s/%s) when deleting the work, err is %v", work.Namespace, work.Name, err)
return controllerruntime.Result{}, err
}
return controllerruntime.Result{}, c.removeFinalizer(work.DeepCopy())
Expand All @@ -83,10 +77,10 @@ func (c *EndpointSliceController) Reconcile(ctx context.Context, req controllerr
// Once the conflict between service_export_controller.go and endpointslice_collect_controller.go is fixed, the following code should be deleted.
if serviceName := util.GetLabelValue(work.Labels, util.ServiceNameLabel); serviceName == "" {
err := helper.DeleteEndpointSlice(c.Client, labels.Set{
workv1alpha1.WorkNamespaceLabel: req.Namespace,
workv1alpha1.WorkNameLabel: req.Name,
workv1alpha2.WorkPermanentIDLabel: work.Labels[workv1alpha2.WorkPermanentIDLabel],
})
if err != nil {
klog.Errorf("Failed to delete endpointslice of the work(%s/%s) when the serviceexport is deleted, err is %v", work.Namespace, work.Name, err)
return controllerruntime.Result{}, err
}
return controllerruntime.Result{}, c.removeFinalizer(work.DeepCopy())
Expand Down Expand Up @@ -146,12 +140,11 @@ func (c *EndpointSliceController) collectEndpointSliceFromWork(work *workv1alpha
}

desiredEndpointSlice := deriveEndpointSlice(endpointSlice, clusterName)
desiredEndpointSlice.Labels = map[string]string{
workv1alpha1.WorkNamespaceLabel: work.Namespace,
workv1alpha1.WorkNameLabel: work.Name,
discoveryv1.LabelServiceName: names.GenerateDerivedServiceName(work.Labels[util.ServiceNameLabel]),
util.ManagedByKarmadaLabel: util.ManagedByKarmadaLabelValue,
}
desiredEndpointSlice.Labels = util.DedupeAndMergeLabels(desiredEndpointSlice.Labels, map[string]string{
workv1alpha2.WorkPermanentIDLabel: work.Labels[workv1alpha2.WorkPermanentIDLabel],
discoveryv1.LabelServiceName: names.GenerateDerivedServiceName(work.Labels[util.ServiceNameLabel]),
util.ManagedByKarmadaLabel: util.ManagedByKarmadaLabelValue,
})

if err = helper.CreateOrUpdateEndpointSlice(c.Client, desiredEndpointSlice); err != nil {
return err
Expand Down
24 changes: 13 additions & 11 deletions pkg/controllers/status/work_status_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ func (c *WorkStatusController) Reconcile(ctx context.Context, req controllerrunt
}

if !util.IsClusterReady(&cluster.Status) {
klog.Errorf("Stop sync work(%s/%s) for cluster(%s) as cluster not ready.", work.Namespace, work.Name, cluster.Name)
klog.Errorf("Stop syncing the Work(%s/%s) to the cluster(%s) as not ready.", work.Namespace, work.Name, cluster.Name)
return controllerruntime.Result{}, fmt.Errorf("cluster(%s) not ready", cluster.Name)
}

Expand Down Expand Up @@ -168,7 +168,7 @@ func generateKey(obj interface{}) (util.QueueKey, error) {
func getClusterNameFromAnnotation(resource *unstructured.Unstructured) (string, error) {
workNamespace, exist := resource.GetAnnotations()[workv1alpha2.WorkNamespaceAnnotation]
if !exist {
klog.V(4).Infof("Ignore resource(%s/%s/%s) which not managed by karmada", resource.GetKind(), resource.GetNamespace(), resource.GetName())
klog.V(4).Infof("Ignore resource(kind=%s, %s/%s) which is not managed by Karmada.", resource.GetKind(), resource.GetNamespace(), resource.GetName())
return "", nil
}

Expand Down Expand Up @@ -196,16 +196,17 @@ func (c *WorkStatusController) syncWorkStatus(key util.QueueKey) error {
return err
}

workNamespace, nsExist := observedObj.GetAnnotations()[workv1alpha2.WorkNamespaceAnnotation]
workName, nameExist := observedObj.GetAnnotations()[workv1alpha2.WorkNameAnnotation]
observedAnnotations := observedObj.GetAnnotations()
workNamespace, nsExist := observedAnnotations[workv1alpha2.WorkNamespaceAnnotation]
workName, nameExist := observedAnnotations[workv1alpha2.WorkNameAnnotation]
if !nsExist || !nameExist {
klog.Infof("Ignore object(%s) which not managed by karmada.", fedKey.String())
klog.Infof("Ignore object(%s) which not managed by Karmada.", fedKey.String())
return nil
}

workObject := &workv1alpha1.Work{}
if err := c.Client.Get(context.TODO(), client.ObjectKey{Namespace: workNamespace, Name: workName}, workObject); err != nil {
// Stop processing if resource no longer exist.
// Stop processing if the resource no longer exists.
if apierrors.IsNotFound(err) {
return nil
}
Expand All @@ -214,7 +215,7 @@ func (c *WorkStatusController) syncWorkStatus(key util.QueueKey) error {
return err
}

// stop update status if Work object in terminating state.
// stop updating status if Work object in terminating state.
if !workObject.DeletionTimestamp.IsZero() {
return nil
}
Expand All @@ -230,6 +231,7 @@ func (c *WorkStatusController) syncWorkStatus(key util.QueueKey) error {
return err
}

util.MergeLabel(desiredObj, workv1alpha2.WorkPermanentIDLabel, util.GetLabelValue(workObject.Labels, workv1alpha2.WorkPermanentIDLabel))
// we should check if the observed status is consistent with the declaration to prevent accidental changes made
// in member clusters.
needUpdate, err := c.ObjectWatcher.NeedsUpdate(clusterName, desiredObj, observedObj)
Expand All @@ -239,7 +241,7 @@ func (c *WorkStatusController) syncWorkStatus(key util.QueueKey) error {

if needUpdate {
if err := c.ObjectWatcher.Update(clusterName, desiredObj, observedObj); err != nil {
klog.Errorf("Update %s failed: %v", fedKey.String(), err)
klog.Errorf("Updating %s failed: %v", fedKey.String(), err)
return err
}
// We can't return even after a success updates, because that might lose the chance to collect status.
Expand All @@ -251,7 +253,7 @@ func (c *WorkStatusController) syncWorkStatus(key util.QueueKey) error {
// status changes.
}

klog.Infof("reflecting %s(%s/%s) status to Work(%s/%s)", observedObj.GetKind(), observedObj.GetNamespace(), observedObj.GetName(), workNamespace, workName)
klog.Infof("Reflecting the resource(kind=%s, %s/%s) status to the Work(%s/%s).", observedObj.GetKind(), observedObj.GetNamespace(), observedObj.GetName(), workNamespace, workName)
return c.reflectStatus(workObject, observedObj)
}

Expand Down Expand Up @@ -301,7 +303,7 @@ func (c *WorkStatusController) recreateResourceIfNeeded(work *workv1alpha1.Work,
if reflect.DeepEqual(desiredGVK, workloadKey.GroupVersionKind()) &&
manifest.GetNamespace() == workloadKey.Namespace &&
manifest.GetName() == workloadKey.Name {
klog.Infof("recreating %s", workloadKey.String())
klog.Infof("Recreating resource(%s).", workloadKey.String())
err := c.ObjectWatcher.Create(workloadKey.Cluster, manifest)
if err != nil {
c.eventf(manifest, corev1.EventTypeWarning, events.EventReasonSyncWorkloadFailed, "Failed to create or update resource(%s/%s) in member cluster(%s): %v", manifest.GetNamespace(), manifest.GetName(), workloadKey.Cluster, err)
Expand Down Expand Up @@ -550,7 +552,7 @@ func (c *WorkStatusController) SetupWithManager(mgr controllerruntime.Manager) e
func (c *WorkStatusController) eventf(object *unstructured.Unstructured, eventType, reason, messageFmt string, args ...interface{}) {
ref, err := helper.GenEventRef(object)
if err != nil {
klog.Errorf("ignore event(%s) as failed to build event reference for: kind=%s, %s due to %v", reason, object.GetKind(), klog.KObj(object), err)
klog.Errorf("Ignore event(%s) as failing to build event reference for: kind=%s, %s due to %v", reason, object.GetKind(), klog.KObj(object), err)
return
}
c.EventRecorder.Eventf(ref, eventType, reason, messageFmt, args...)
Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/status/work_status_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -408,7 +408,7 @@ func TestGenerateKey(t *testing.T) {
"metadata": map[string]interface{}{
"name": "test",
"namespace": "default",
"labels": map[string]interface{}{
"annotations": map[string]interface{}{
"test": "karmada-es-cluster",
},
},
Expand Down Expand Up @@ -471,7 +471,7 @@ func TestGetClusterNameFromAnnotation(t *testing.T) {
"metadata": map[string]interface{}{
"name": "test",
"namespace": "default",
"labels": map[string]interface{}{
"annotations": map[string]interface{}{
"foo": "karmada-es-cluster",
},
},
Expand Down
6 changes: 2 additions & 4 deletions pkg/karmadactl/promote/promote.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@ import (

configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1"
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
karmadaclientset "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
"github.com/karmada-io/karmada/pkg/karmadactl/get"
Expand Down Expand Up @@ -317,8 +316,7 @@ func (o *CommandPromoteOption) revertPromotedDeps(memberClusterFactory cmdutil.F
return
}
// remove the karmada label of dependency resource in member cluster to avoid deleting it when deleting the resource in control plane
u.RemoveLabels(depObj, workv1alpha1.WorkNamespaceLabel)
u.RemoveLabels(depObj, workv1alpha1.WorkNameLabel)
u.RemoveLabels(depObj, workv1alpha2.WorkPermanentIDLabel, u.ManagedByKarmadaLabel)
if len(depObj.GetNamespace()) != 0 {
// update the dependency resource in member cluster
_, err := memberDynamicClient.Resource(depGvr).Namespace(depObj.GetNamespace()).Update(context.Background(), depObj, metav1.UpdateOptions{})
Expand Down Expand Up @@ -707,7 +705,7 @@ func (o *CommandPromoteOption) createClusterPropagationPolicy(karmadaClient *kar
// preprocessResource delete redundant fields to convert resource as template
func preprocessResource(obj *unstructured.Unstructured) error {
// remove fields that generated by kube-apiserver and no need(or can't) propagate to member clusters.
if err := prune.RemoveIrrelevantField(obj); err != nil {
if err := prune.RemoveIrrelevantFields(obj); err != nil {
return err
}

Expand Down
4 changes: 2 additions & 2 deletions pkg/resourceinterpreter/default/native/prune/prune.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,9 @@ var kindIrrelevantFieldPruners = map[string]irrelevantFieldPruneFunc{
util.ServiceKind: removeServiceIrrelevantField,
}

// RemoveIrrelevantField used to remove fields that generated by kube-apiserver and no need(or can't) propagate to
// RemoveIrrelevantFields used to remove fields that generated by kube-apiserver and no need(or can't) propagate to
// member clusters.
func RemoveIrrelevantField(workload *unstructured.Unstructured, extraHooks ...func(*unstructured.Unstructured)) error {
func RemoveIrrelevantFields(workload *unstructured.Unstructured, extraHooks ...func(*unstructured.Unstructured)) error {
// populated by the kubernetes.
unstructured.RemoveNestedField(workload.Object, "metadata", "creationTimestamp")

Expand Down
2 changes: 1 addition & 1 deletion pkg/resourceinterpreter/default/native/prune/prune_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ func TestRemoveIrrelevantField(t *testing.T) {

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := RemoveIrrelevantField(tt.workload, tt.extraHooks...); err != nil {
if err := RemoveIrrelevantFields(tt.workload, tt.extraHooks...); err != nil {
t.Fatalf("RemoveIrrelevantField() expects no error but got: %v", err)
return
}
Expand Down
Loading

0 comments on commit 7e700d7

Please sign in to comment.