-
Notifications
You must be signed in to change notification settings - Fork 0
/
controller.go
313 lines (262 loc) · 10.9 KB
/
controller.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"fmt"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
coreinformers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
"sigs.k8s.io/sig-storage-local-static-provisioner/pkg/common"
cleanupmetrics "sigs.k8s.io/sig-storage-local-static-provisioner/pkg/metrics/node-cleanup"
)
// CleanupController handles the deletion of PVCs that reference deleted Nodes.
// Once a Node is deleted, if there was a PVC associated with that Node, a timer is started
// for resource deletion. The PVC is deleted if the Node doesn't come back in a user-defined amount of time.
type CleanupController struct {
client kubernetes.Interface
// pvQueue is a rate-limited delayed queue. PVs names are added
// to this queue when its corresponding Node is deleted. The queue's delay allows to wait
// for the Node to come back up before cleaning up resources.
pvQueue workqueue.RateLimitingInterface
nodeLister corelisters.NodeLister
nodeListerSynced cache.InformerSynced
pvLister corelisters.PersistentVolumeLister
pvListerSynced cache.InformerSynced
pvcLister corelisters.PersistentVolumeClaimLister
pvcListerSynced cache.InformerSynced
eventRecorder record.EventRecorder
broadcaster record.EventBroadcaster
// storageClassNames is the list StorageClasses that PVs and PVCs
// can belong to in order to be eligible for cleanup
storageClassNames []string
// pvcDeletionDelay is the amount of time to wait after Node deletion to cleanup resources.
pvcDeletionDelay time.Duration
// stalePVDiscoveryInterval is how often to scan for and delete PVs with affinity to a deleted Node.
stalePVDiscoveryInterval time.Duration
}
// NewCleanupController creates a CleanupController that handles the
// deletion of stale PVCs.
func NewCleanupController(client kubernetes.Interface, pvInformer coreinformers.PersistentVolumeInformer, pvcInformer coreinformers.PersistentVolumeClaimInformer, nodeInformer coreinformers.NodeInformer, storageClassNames []string, pvcDeletionDelay time.Duration, stalePVDiscoveryInterval time.Duration) *CleanupController {
broadcaster := record.NewBroadcaster()
eventRecorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: fmt.Sprintf("cleanup-controller")})
controller := &CleanupController{
client: client,
storageClassNames: storageClassNames,
// Delayed queue with rate limiting
pvQueue: workqueue.NewRateLimitingQueueWithConfig(
workqueue.DefaultControllerRateLimiter(),
workqueue.RateLimitingQueueConfig{
Name: "stalePVQueue",
}),
nodeLister: nodeInformer.Lister(),
nodeListerSynced: nodeInformer.Informer().HasSynced,
pvLister: pvInformer.Lister(),
pvListerSynced: pvInformer.Informer().HasSynced,
pvcLister: pvcInformer.Lister(),
pvcListerSynced: pvcInformer.Informer().HasSynced,
eventRecorder: eventRecorder,
broadcaster: broadcaster,
pvcDeletionDelay: pvcDeletionDelay,
stalePVDiscoveryInterval: stalePVDiscoveryInterval,
}
// Set up event handler for when Nodes are deleted
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
DeleteFunc: controller.nodeDeleted,
})
return controller
}
// Run will start worker threads that try to process items off of the entryQueue, as well
// as syncing informer caches and continuously running the Deleter.
// It will block until the context gives a Done signal, at which point it will shutdown the workqueue and wait for
// workers to finish processing their current work items.
func (c *CleanupController) Run(ctx context.Context, workers int) error {
defer utilruntime.HandleCrash()
defer c.pvQueue.ShutDown()
klog.Info("Starting to Run CleanupController")
c.broadcaster.StartRecordingToSink(&corev1.EventSinkImpl{Interface: c.client.CoreV1().Events(v1.NamespaceAll)})
defer c.broadcaster.Shutdown()
klog.Info("Waiting for informer caches to sync")
// Wait for the caches to be synced before starting workers
if ok := cache.WaitForCacheSync(ctx.Done(), c.nodeListerSynced, c.pvListerSynced, c.pvcListerSynced); !ok {
return fmt.Errorf("failed to wait for caches to sync")
}
klog.Infof("Starting workers, count: %d", workers)
// Launch workers to process
for i := 0; i < workers; i++ {
go wait.UntilWithContext(ctx, c.runWorker, time.Second)
}
klog.Info("Started workers")
// Look for stale PVs and start timers for resource cleanup
c.startCleanupTimersIfNeeded()
<-ctx.Done()
klog.Info("Shutting down workers")
return nil
}
// runWorker is a long-running function that will continually call the
// processNextWorkItem function in order to read and process a message on the entryQueue.
func (c *CleanupController) runWorker(ctx context.Context) {
for c.processNextWorkItem(ctx) {
}
}
// processNextWorkItem will read a single work item off the entryQueue and
// attempt to process it, by calling the syncHandler.
func (c *CleanupController) processNextWorkItem(ctx context.Context) bool {
key, shutdown := c.pvQueue.Get()
if shutdown {
return false
}
defer c.pvQueue.Done(key)
pvName, ok := key.(string)
if !ok {
// Item is invalid so we can forget it.
c.pvQueue.Forget(key)
klog.Errorf("expected string in workqueue but got %+v", key)
return true
}
err := c.syncHandler(ctx, pvName)
if err != nil {
// An error occurred so re-add the item to the queue to work on later (has backoff to avoid
// hot-looping).
c.pvQueue.AddRateLimited(key)
klog.Errorf("error syncing %q: %v, requeuing", pvName, err)
return true
}
c.pvQueue.Forget(key)
return true
}
// syncHandler processes a PV by deleting the PVC bound to if it's
// associated Node is gone.
func (c *CleanupController) syncHandler(ctx context.Context, pvName string) error {
pv, err := c.pvLister.Get(pvName)
if err != nil {
if errors.IsNotFound(err) {
// PV was deleted in the meantime, ignore.
klog.Infof("PV %q in queue no longer exists", pvName)
return nil
}
return err
}
nodeName, ok := common.NodeAttachedToLocalPV(pv)
if !ok {
// For whatever reason the PV isn't formatted properly so we will
// never be able to get its corresponding Node, so ignore.
klog.Errorf("error getting node attached to pv: %s", pv)
return nil
}
nodeExists, err := common.NodeExists(c.nodeLister, nodeName)
if err != nil {
return err
}
// Check that the node the PV/PVC reference is still deleted
if nodeExists {
return nil
}
pvClaimRef := pv.Spec.ClaimRef
if pvClaimRef == nil {
return nil
}
pvc, err := c.pvcLister.PersistentVolumeClaims(pvClaimRef.Namespace).Get(pvClaimRef.Name)
if err != nil {
if errors.IsNotFound(err) {
// PVC was deleted in the meantime, ignore.
klog.Infof("PVC %q in namespace %q no longer exists", pvClaimRef.Name, pvClaimRef.Namespace)
return nil
}
return err
}
// Check that the PVC we're about to delete still points back to the PV that enqueued it
// and that it is the exact same PVC from the PV's claimRef
if pvc.Spec.VolumeName != pv.Name || pvc.UID != pvClaimRef.UID {
klog.Infof("Original bond between PVC %q and PV %q was severed. The original objects don't reference each other", pvc.Name, pv.Name)
return nil
}
err = c.deletePVC(ctx, pvc)
if err != nil {
cleanupmetrics.PersistentVolumeClaimDeleteFailedTotal.Inc()
klog.Errorf("failed to delete pvc %q in namespace %q: %v", pvClaimRef.Name, pvClaimRef.Namespace, err)
return err
}
cleanupmetrics.PersistentVolumeClaimDeleteTotal.Inc()
klog.Infof("Deleted PVC %q that pointed to Node %q", pvClaimRef.Name, nodeName)
return nil
}
func (c *CleanupController) nodeDeleted(obj interface{}) {
c.startCleanupTimersIfNeeded()
}
// startCleanupTimersIfNeeded enqueues any local PVs
// with a NodeAffinity to a deleted Node and a StorageClass listed in storageClassNames.
func (c *CleanupController) startCleanupTimersIfNeeded() {
pvs, err := c.pvLister.List(labels.Everything())
if err != nil {
klog.Errorf("error listing pvs: %v", err)
return
}
for _, pv := range pvs {
if !common.IsLocalPVWithStorageClass(pv, c.storageClassNames) {
continue
}
nodeName, ok := common.NodeAttachedToLocalPV(pv)
if !ok {
klog.Errorf("error getting node attached to pv: %s", pv)
continue
}
shouldEnqueue, err := c.shouldEnqueueEntry(pv, nodeName)
if err != nil {
klog.Errorf("error determining whether to enqueue entry with pv %q: %v", pv.Name, err)
continue
}
if shouldEnqueue {
klog.Infof("Starting timer for resource deletion, resource:%s, timer duration: %s", pv.Spec.ClaimRef, c.pvcDeletionDelay.String())
c.eventRecorder.Event(pv.Spec.ClaimRef, v1.EventTypeWarning, "ReferencedNodeDeleted", fmt.Sprintf("PVC is tied to a deleted Node. PVC will be cleaned up in %s if the Node doesn't come back", c.pvcDeletionDelay.String()))
c.pvQueue.AddAfter(pv.Name, c.pvcDeletionDelay)
}
}
}
// shouldEnqueuePV checks if a PV should be enqueued to the entryQueue.
// The PV must be a local PV, have a StorageClass present in the list of storageClassNames, have a NodeAffinity
// to a deleted Node, and have a PVC bound to it (otherwise there's nothing to clean up).
func (c *CleanupController) shouldEnqueueEntry(pv *v1.PersistentVolume, nodeName string) (bool, error) {
if pv.Spec.ClaimRef == nil {
return false, nil
}
exists, err := common.NodeExists(c.nodeLister, nodeName)
return !exists && err == nil, err
}
// deletePVC deletes the PVC with the given name and namespace
// and returns nil if the operation was successful or if the PVC doesn't exist
func (c *CleanupController) deletePVC(ctx context.Context, pvc *v1.PersistentVolumeClaim) error {
options := metav1.DeleteOptions{
Preconditions: &metav1.Preconditions{UID: &pvc.UID},
}
err := c.client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(ctx, pvc.Name, options)
if err != nil && errors.IsNotFound(err) {
// The PVC could already be deleted by some other process
klog.Infof("PVC %q in namespace %q no longer exists", pvc.Name, pvc.Namespace)
return nil
}
return err
}