diff --git a/go.mod b/go.mod index f99c1f2bd2ff..63939ae25b89 100644 --- a/go.mod +++ b/go.mod @@ -162,3 +162,5 @@ require ( // TODO: https://github.com/knative/serving/issues/14597 replace github.com/gorilla/websocket => github.com/gorilla/websocket v1.5.0 + +replace knative.dev/networking => github.com/ReToCode/networking v0.0.0-20240109073627-bc010699726a diff --git a/go.sum b/go.sum index b473fadc6778..e0780bd03fc8 100644 --- a/go.sum +++ b/go.sum @@ -68,6 +68,8 @@ github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBp github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/ReToCode/networking v0.0.0-20240109073627-bc010699726a h1:tNqizB6R53cc8/k0GmYQNh3qAvG7JGZXuUpD0Qc8cOQ= +github.com/ReToCode/networking v0.0.0-20240109073627-bc010699726a/go.mod h1:KL3tdfTQ58qOdE66WagSuFtppfMCNp+ZlDL3Oc6Bxno= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/ahmetb/gen-crd-api-reference-docs v0.3.1-0.20210609063737-0067dc6dcea2 h1:t/ces1/q8tuApSb+T5ajsu3wqkofUT43U1gpDYTPYME= @@ -969,8 +971,6 @@ knative.dev/caching v0.0.0-20240108135517-d66e24d0d616 h1:VXAmfO+t6ssnZ1K6/UmKgm knative.dev/caching v0.0.0-20240108135517-d66e24d0d616/go.mod h1:lISrLTfhtKvPKPMCqJJkCE8V+M6l2gNZyb/b73QiR4o= knative.dev/hack v0.0.0-20240108153050-3ea694d6dad7 h1:mICurlRke2mlKP3LmyWYQYl6KZe80rYP5+ag9w2HQLA= knative.dev/hack v0.0.0-20240108153050-3ea694d6dad7/go.mod h1:yk2OjGDsbEnQjfxdm0/HJKS2WqTLEFg/N6nUs6Rqx3Q= -knative.dev/networking v0.0.0-20240108134621-7cca4b010b25 h1:2H/mvXGSgfBcpNFUMWC3XCWL5zOrGGvas4RBbclEJ1M= -knative.dev/networking v0.0.0-20240108134621-7cca4b010b25/go.mod h1:ynNh7EoqxkST4jWPw6ZVyI9jwuF68WJNrX65yoAQ7FE= knative.dev/pkg v0.0.0-20240108152118-de3e9cc204c9 h1:4nsTvrgApGtLTt6Gpo7ulJS03pBI+wSJ8+EOwVamDx0= knative.dev/pkg v0.0.0-20240108152118-de3e9cc204c9/go.mod h1:YzKN/kzcJPhL+Z4fwuzbaiEnRLIbYvOiZUuCWFJ7PRA= pgregory.net/rapid v1.1.0 h1:CMa0sjHSru3puNx+J0MIAuiiEV4N0qj8/cMWGBBCsjw= diff --git a/pkg/activator/net/helpers.go b/pkg/activator/net/helpers.go index 3a8e5ef49188..f42b1479444a 100644 --- a/pkg/activator/net/helpers.go +++ b/pkg/activator/net/helpers.go @@ -28,7 +28,7 @@ import ( // healthyAddresses takes an endpoints object and a port name and return the set // of addresses that implement this port. -func healthyAddresses(endpoints *corev1.Endpoints, portName string) sets.String { +func healthyAddresses(endpoints *corev1.Endpoints, portName string) sets.Set[string] { var addresses int for _, es := range endpoints.Subsets { for _, port := range es.Ports { @@ -39,7 +39,7 @@ func healthyAddresses(endpoints *corev1.Endpoints, portName string) sets.String } } - ready := make(sets.String, addresses) + ready := make(sets.Set[string], addresses) for _, es := range endpoints.Subsets { for _, port := range es.Ports { @@ -57,7 +57,7 @@ func healthyAddresses(endpoints *corev1.Endpoints, portName string) sets.String // endpointsToDests takes an endpoints object and a port name and returns two sets of // ready and non-ready l4 dests in the endpoints object which have that port. -func endpointsToDests(endpoints *corev1.Endpoints, portName string) (ready, notReady sets.String) { +func endpointsToDests(endpoints *corev1.Endpoints, portName string) (ready, notReady sets.Set[string]) { var readyAddresses, nonReadyAddresses int for _, es := range endpoints.Subsets { for _, port := range es.Ports { @@ -69,8 +69,8 @@ func endpointsToDests(endpoints *corev1.Endpoints, portName string) (ready, notR } } - ready = make(sets.String, readyAddresses) - notReady = make(sets.String, nonReadyAddresses) + ready = make(sets.Set[string], readyAddresses) + notReady = make(sets.Set[string], nonReadyAddresses) for _, es := range endpoints.Subsets { for _, port := range es.Ports { diff --git a/pkg/activator/net/helpers_test.go b/pkg/activator/net/helpers_test.go index 62f082eee2db..7e0cb0702f16 100644 --- a/pkg/activator/net/helpers_test.go +++ b/pkg/activator/net/helpers_test.go @@ -31,12 +31,12 @@ func TestEndpointsToDests(t *testing.T) { name string endpoints corev1.Endpoints protocol networking.ProtocolType - expectReady sets.String - expectNotReady sets.String + expectReady sets.Set[string] + expectNotReady sets.Set[string] }{{ name: "no endpoints", endpoints: corev1.Endpoints{}, - expectReady: sets.NewString(), + expectReady: sets.New[string](), }, { name: "single endpoint single address", endpoints: corev1.Endpoints{ @@ -50,7 +50,7 @@ func TestEndpointsToDests(t *testing.T) { }}, }}, }, - expectReady: sets.NewString("128.0.0.1:1234"), + expectReady: sets.New("128.0.0.1:1234"), }, { name: "single endpoint multiple address", endpoints: corev1.Endpoints{ @@ -66,7 +66,7 @@ func TestEndpointsToDests(t *testing.T) { }}, }}, }, - expectReady: sets.NewString("128.0.0.1:1234", "128.0.0.2:1234"), + expectReady: sets.New("128.0.0.1:1234", "128.0.0.2:1234"), }, { name: "single endpoint multiple addresses, including no ready addresses", endpoints: corev1.Endpoints{ @@ -85,8 +85,8 @@ func TestEndpointsToDests(t *testing.T) { }}, }}, }, - expectReady: sets.NewString("128.0.0.1:1234", "128.0.0.2:1234"), - expectNotReady: sets.NewString("128.0.0.3:1234"), + expectReady: sets.New("128.0.0.1:1234", "128.0.0.2:1234"), + expectNotReady: sets.New("128.0.0.3:1234"), }, { name: "multiple endpoint filter port", endpoints: corev1.Endpoints{ @@ -108,7 +108,7 @@ func TestEndpointsToDests(t *testing.T) { }}, }}, }, - expectReady: sets.NewString("128.0.0.1:1234"), + expectReady: sets.New("128.0.0.1:1234"), }, { name: "multiple endpoint, different protocol", protocol: networking.ProtocolH2C, @@ -135,7 +135,7 @@ func TestEndpointsToDests(t *testing.T) { }}, }}, }, - expectReady: sets.NewString("128.0.0.3:5678", "128.0.0.4:5678"), + expectReady: sets.New("128.0.0.3:5678", "128.0.0.4:5678"), }} { t.Run(tc.name, func(t *testing.T) { if tc.protocol == "" { diff --git a/pkg/activator/net/revision_backends.go b/pkg/activator/net/revision_backends.go index 3dab280fb11f..41bcd9372fad 100644 --- a/pkg/activator/net/revision_backends.go +++ b/pkg/activator/net/revision_backends.go @@ -64,15 +64,15 @@ import ( type revisionDestsUpdate struct { Rev types.NamespacedName ClusterIPDest string - Dests sets.String + Dests sets.Set[string] } type dests struct { - ready sets.String - notReady sets.String + ready sets.Set[string] + notReady sets.Set[string] } -func (d dests) becameNonReady(prev dests) sets.String { +func (d dests) becameNonReady(prev dests) sets.Set[string] { return prev.ready.Intersection(d.notReady) } @@ -102,7 +102,7 @@ type revisionWatcher struct { done chan struct{} // Stores the list of pods that have been successfully probed. - healthyPods sets.String + healthyPods sets.Set[string] // Stores whether the service ClusterIP has been seen as healthy. clusterIPHealthy bool @@ -227,7 +227,7 @@ func (rw *revisionWatcher) probeClusterIP(dest string) (bool, error) { // the ones that are successfully probed, whether the update was a no-op, or an error. // If probing fails but not all errors were compatible with being caused by // mesh being enabled, being enabled, notMesh will be true. -func (rw *revisionWatcher) probePodIPs(ready, notReady sets.String) (succeeded sets.String, noop bool, notMesh bool, err error) { +func (rw *revisionWatcher) probePodIPs(ready, notReady sets.Set[string]) (succeeded sets.Set[string], noop bool, notMesh bool, err error) { dests := ready.Union(notReady) // Short circuit case where all the current pods are already known to be healthy. @@ -296,7 +296,7 @@ func (rw *revisionWatcher) probePodIPs(ready, notReady sets.String) (succeeded s return healthy, unchanged, sawNotMesh.Load(), err } -func (rw *revisionWatcher) sendUpdate(clusterIP string, dests sets.String) { +func (rw *revisionWatcher) sendUpdate(clusterIP string, dests sets.Set[string]) { select { case <-rw.stopCh: return diff --git a/pkg/activator/net/revision_backends_test.go b/pkg/activator/net/revision_backends_test.go index aebadccf0666..1f9e59b10ae2 100644 --- a/pkg/activator/net/revision_backends_test.go +++ b/pkg/activator/net/revision_backends_test.go @@ -133,13 +133,13 @@ func TestRevisionWatcher(t *testing.T) { meshMode netcfg.MeshCompatibilityMode }{{ name: "single healthy podIP", - dests: dests{ready: sets.NewString("128.0.0.1:1234")}, + dests: dests{ready: sets.New("128.0.0.1:1234")}, clusterPort: corev1.ServicePort{ Name: "http", Port: 1234, }, clusterIP: "129.0.0.1", - expectUpdates: []revisionDestsUpdate{{Dests: sets.NewString("128.0.0.1:1234")}}, + expectUpdates: []revisionDestsUpdate{{Dests: sets.New("128.0.0.1:1234")}}, probeHostResponses: map[string][]activatortest.FakeResponse{ "128.0.0.1:1234": {{ Code: http.StatusOK, @@ -148,13 +148,13 @@ func TestRevisionWatcher(t *testing.T) { }, }, { name: "single not ready but healthy podIP", - dests: dests{notReady: sets.NewString("128.0.0.1:1234")}, + dests: dests{notReady: sets.New("128.0.0.1:1234")}, clusterPort: corev1.ServicePort{ Name: "http", Port: 1234, }, clusterIP: "129.0.0.1", - expectUpdates: []revisionDestsUpdate{{Dests: sets.NewString("128.0.0.1:1234")}}, + expectUpdates: []revisionDestsUpdate{{Dests: sets.New("128.0.0.1:1234")}}, probeHostResponses: map[string][]activatortest.FakeResponse{ "128.0.0.1:1234": {{ Code: http.StatusOK, @@ -163,14 +163,14 @@ func TestRevisionWatcher(t *testing.T) { }, }, { name: "single http2 podIP", - dests: dests{ready: sets.NewString("128.0.0.1:1234")}, + dests: dests{ready: sets.New("128.0.0.1:1234")}, protocol: pkgnet.ProtocolH2C, clusterPort: corev1.ServicePort{ Name: "http2", Port: 1234, }, clusterIP: "129.0.0.1", - expectUpdates: []revisionDestsUpdate{{Dests: sets.NewString("128.0.0.1:1234")}}, + expectUpdates: []revisionDestsUpdate{{Dests: sets.New("128.0.0.1:1234")}}, probeHostResponses: map[string][]activatortest.FakeResponse{ "129.0.0.1:1234": {{ Err: errors.New("clusterIP transport error"), @@ -182,7 +182,7 @@ func TestRevisionWatcher(t *testing.T) { }, }, { name: "single http2 clusterIP", - dests: dests{ready: sets.NewString("128.0.0.1:1234"), notReady: sets.NewString("128.0.0.2:1234")}, + dests: dests{ready: sets.New("128.0.0.1:1234"), notReady: sets.New("128.0.0.2:1234")}, protocol: pkgnet.ProtocolH2C, clusterPort: corev1.ServicePort{ Name: "http2", @@ -190,7 +190,7 @@ func TestRevisionWatcher(t *testing.T) { }, clusterIP: "129.0.0.1", noPodAddressability: true, - expectUpdates: []revisionDestsUpdate{{ClusterIPDest: "129.0.0.1:1234", Dests: sets.NewString("128.0.0.1:1234")}}, + expectUpdates: []revisionDestsUpdate{{ClusterIPDest: "129.0.0.1:1234", Dests: sets.New("128.0.0.1:1234")}}, probeHostResponses: map[string][]activatortest.FakeResponse{ "129.0.0.1:1234": {{ Code: http.StatusOK, @@ -214,7 +214,7 @@ func TestRevisionWatcher(t *testing.T) { initialClusterIPState: true, }, { name: "single unavailable podIP", - dests: dests{ready: sets.NewString("128.0.0.1:1234")}, + dests: dests{ready: sets.New("128.0.0.1:1234")}, clusterPort: corev1.ServicePort{ Name: "http", Port: 1234, @@ -230,7 +230,7 @@ func TestRevisionWatcher(t *testing.T) { }, }, { name: "single error podIP", - dests: dests{ready: sets.NewString("128.0.0.1:1234")}, + dests: dests{ready: sets.New("128.0.0.1:1234")}, clusterPort: corev1.ServicePort{ Name: "http", Port: 1234, @@ -246,13 +246,13 @@ func TestRevisionWatcher(t *testing.T) { }, }, { name: "podIP slow ready", - dests: dests{ready: sets.NewString("128.0.0.1:1234")}, + dests: dests{ready: sets.New("128.0.0.1:1234")}, clusterPort: corev1.ServicePort{ Name: "http", Port: 1234, }, clusterIP: "129.0.0.1", - expectUpdates: []revisionDestsUpdate{{Dests: sets.NewString("128.0.0.1:1234")}}, + expectUpdates: []revisionDestsUpdate{{Dests: sets.New("128.0.0.1:1234")}}, probeHostResponses: map[string][]activatortest.FakeResponse{ "129.0.0.1:1234": {{ Err: errors.New("clusterIP transport error"), @@ -266,13 +266,13 @@ func TestRevisionWatcher(t *testing.T) { }, }, { name: "multiple healthy podIP", - dests: dests{ready: sets.NewString("128.0.0.1:1234", "128.0.0.2:1234"), notReady: sets.NewString("128.0.0.3:1234")}, + dests: dests{ready: sets.New("128.0.0.1:1234", "128.0.0.2:1234"), notReady: sets.New("128.0.0.3:1234")}, clusterPort: corev1.ServicePort{ Name: "http", Port: 1234, }, clusterIP: "129.0.0.1", - expectUpdates: []revisionDestsUpdate{{Dests: sets.NewString("128.0.0.1:1234", "128.0.0.2:1234", "128.0.0.3:1234")}}, + expectUpdates: []revisionDestsUpdate{{Dests: sets.New("128.0.0.1:1234", "128.0.0.2:1234", "128.0.0.3:1234")}}, probeHostResponses: map[string][]activatortest.FakeResponse{ "128.0.0.1:1234": {{ Code: http.StatusOK, @@ -289,13 +289,13 @@ func TestRevisionWatcher(t *testing.T) { }, }, { name: "one healthy one unhealthy podIP", - dests: dests{ready: sets.NewString("128.0.0.1:1234", "128.0.0.2:1234")}, + dests: dests{ready: sets.New("128.0.0.1:1234", "128.0.0.2:1234")}, clusterPort: corev1.ServicePort{ Name: "http", Port: 1234, }, clusterIP: "129.0.0.1", - expectUpdates: []revisionDestsUpdate{{Dests: sets.NewString("128.0.0.2:1234")}}, + expectUpdates: []revisionDestsUpdate{{Dests: sets.New("128.0.0.2:1234")}}, probeHostResponses: map[string][]activatortest.FakeResponse{ "128.0.0.1:1234": {{ Err: errors.New("clusterIP transport error"), @@ -307,15 +307,15 @@ func TestRevisionWatcher(t *testing.T) { }, }, { name: "one healthy one unhealthy podIP then both healthy", - dests: dests{ready: sets.NewString("128.0.0.1:1234", "128.0.0.2:1234")}, + dests: dests{ready: sets.New("128.0.0.1:1234", "128.0.0.2:1234")}, clusterPort: corev1.ServicePort{ Name: "http", Port: 4321, }, clusterIP: "129.0.0.1", expectUpdates: []revisionDestsUpdate{ - {Dests: sets.NewString("128.0.0.2:1234")}, - {Dests: sets.NewString("128.0.0.2:1234", "128.0.0.1:1234")}, + {Dests: sets.New("128.0.0.2:1234")}, + {Dests: sets.New("128.0.0.2:1234", "128.0.0.1:1234")}, }, probeHostResponses: map[string][]activatortest.FakeResponse{ "128.0.0.1:1234": {{ @@ -334,7 +334,7 @@ func TestRevisionWatcher(t *testing.T) { }, }, { name: "clusterIP slow ready, no pod addressability", - dests: dests{ready: sets.NewString("128.0.0.1:1234")}, + dests: dests{ready: sets.New("128.0.0.1:1234")}, clusterPort: corev1.ServicePort{ Name: "http", Port: 1234, @@ -342,7 +342,7 @@ func TestRevisionWatcher(t *testing.T) { clusterIP: "129.0.0.1", expectUpdates: []revisionDestsUpdate{{ ClusterIPDest: "129.0.0.1:1234", - Dests: sets.NewString("128.0.0.1:1234"), + Dests: sets.New("128.0.0.1:1234"), }}, noPodAddressability: true, probeHostResponses: map[string][]activatortest.FakeResponse{ @@ -360,7 +360,7 @@ func TestRevisionWatcher(t *testing.T) { }, }, { name: "clusterIP ready, no pod addressability", - dests: dests{ready: sets.NewString("128.0.0.1:1234")}, + dests: dests{ready: sets.New("128.0.0.1:1234")}, clusterPort: corev1.ServicePort{ Name: "http", Port: 1235, @@ -369,7 +369,7 @@ func TestRevisionWatcher(t *testing.T) { clusterIP: "129.0.0.1", expectUpdates: []revisionDestsUpdate{{ ClusterIPDest: "129.0.0.1:1235", - Dests: sets.NewString("128.0.0.1:1234"), + Dests: sets.New("128.0.0.1:1234"), }}, probeHostResponses: map[string][]activatortest.FakeResponse{ "129.0.0.1:1234": {{ @@ -382,7 +382,7 @@ func TestRevisionWatcher(t *testing.T) { }, }, { name: "clusterIP ready, pod fails with non-mesh error then succeeds", - dests: dests{ready: sets.NewString("128.0.0.1:1234")}, + dests: dests{ready: sets.New("128.0.0.1:1234")}, clusterPort: corev1.ServicePort{ Name: "http", Port: 1235, @@ -390,7 +390,7 @@ func TestRevisionWatcher(t *testing.T) { noPodAddressability: false, clusterIP: "129.0.0.1", expectUpdates: []revisionDestsUpdate{ - {Dests: sets.NewString("128.0.0.1:1234")}, + {Dests: sets.New("128.0.0.1:1234")}, }, probeHostResponses: map[string][]activatortest.FakeResponse{ "129.0.0.1:1234": {{ @@ -407,7 +407,7 @@ func TestRevisionWatcher(t *testing.T) { }, }, { name: "passthrough lb, clusterIP ready but no fallback", - dests: dests{ready: sets.NewString("128.0.0.1:1234")}, + dests: dests{ready: sets.New("128.0.0.1:1234")}, clusterPort: corev1.ServicePort{ Name: "http", Port: 1235, @@ -426,7 +426,7 @@ func TestRevisionWatcher(t *testing.T) { usePassthroughLb: true, }, { name: "mesh mode enabled: pod ready but should still use cluster IP", - dests: dests{ready: sets.NewString("128.0.0.1:1234")}, + dests: dests{ready: sets.New("128.0.0.1:1234")}, clusterPort: corev1.ServicePort{ Name: "http", Port: 1235, @@ -434,7 +434,7 @@ func TestRevisionWatcher(t *testing.T) { noPodAddressability: true, clusterIP: "129.0.0.1", expectUpdates: []revisionDestsUpdate{ - {ClusterIPDest: "129.0.0.1:1235", Dests: sets.NewString("128.0.0.1:1234")}, + {ClusterIPDest: "129.0.0.1:1235", Dests: sets.New("128.0.0.1:1234")}, }, meshMode: netcfg.MeshCompatibilityModeEnabled, probeHostResponses: map[string][]activatortest.FakeResponse{ @@ -450,7 +450,7 @@ func TestRevisionWatcher(t *testing.T) { }, }, { name: "mesh mode disabled: pod initially returns mesh-compatible error, but don't fallback", - dests: dests{ready: sets.NewString("128.0.0.1:1234")}, + dests: dests{ready: sets.New("128.0.0.1:1234")}, clusterPort: corev1.ServicePort{ Name: "http", Port: 1235, @@ -458,7 +458,7 @@ func TestRevisionWatcher(t *testing.T) { noPodAddressability: false, clusterIP: "129.0.0.1", expectUpdates: []revisionDestsUpdate{ - {Dests: sets.NewString("128.0.0.1:1234")}, + {Dests: sets.New("128.0.0.1:1234")}, }, meshMode: netcfg.MeshCompatibilityModeDisabled, probeHostResponses: map[string][]activatortest.FakeResponse{ @@ -478,7 +478,7 @@ func TestRevisionWatcher(t *testing.T) { }, }, { name: "ready pod in k8s api when mesh-compat disabled", - dests: dests{ready: sets.NewString("128.0.0.1:1234")}, + dests: dests{ready: sets.New("128.0.0.1:1234")}, clusterPort: corev1.ServicePort{ Name: "http", Port: 1235, @@ -486,7 +486,7 @@ func TestRevisionWatcher(t *testing.T) { noPodAddressability: false, clusterIP: "129.0.0.1", expectUpdates: []revisionDestsUpdate{ - {Dests: sets.NewString("128.0.0.1:1234")}, + {Dests: sets.New("128.0.0.1:1234")}, }, meshMode: netcfg.MeshCompatibilityModeDisabled, probeHostResponses: map[string][]activatortest.FakeResponse{ @@ -678,7 +678,7 @@ func TestRevisionBackendManagerAddEndpoint(t *testing.T) { }, expectDests: map[types.NamespacedName]revisionDestsUpdate{ {Namespace: testNamespace, Name: testRevision}: { - Dests: sets.NewString("128.0.0.1:1234"), + Dests: sets.New("128.0.0.1:1234"), }, }, updateCnt: 1, @@ -706,7 +706,7 @@ func TestRevisionBackendManagerAddEndpoint(t *testing.T) { }, expectDests: map[types.NamespacedName]revisionDestsUpdate{ {Namespace: testNamespace, Name: testRevision}: { - Dests: sets.NewString("128.0.0.1:1234"), + Dests: sets.New("128.0.0.1:1234"), }, }, updateCnt: 1, @@ -732,10 +732,10 @@ func TestRevisionBackendManagerAddEndpoint(t *testing.T) { }, expectDests: map[types.NamespacedName]revisionDestsUpdate{ {Namespace: testNamespace, Name: "test-revision1"}: { - Dests: sets.NewString("128.0.0.1:1234"), + Dests: sets.New("128.0.0.1:1234"), }, {Namespace: testNamespace, Name: "test-revision2"}: { - Dests: sets.NewString("128.1.0.2:1235"), + Dests: sets.New("128.1.0.2:1235"), }, }, updateCnt: 2, @@ -782,7 +782,7 @@ func TestRevisionBackendManagerAddEndpoint(t *testing.T) { expectDests: map[types.NamespacedName]revisionDestsUpdate{ {Namespace: testNamespace, Name: testRevision}: { ClusterIPDest: "129.0.0.1:1234", - Dests: sets.NewString("128.0.0.1:1234"), + Dests: sets.New("128.0.0.1:1234"), }, }, updateCnt: 1, @@ -827,7 +827,7 @@ func TestRevisionBackendManagerAddEndpoint(t *testing.T) { }, expectDests: map[types.NamespacedName]revisionDestsUpdate{ {Namespace: testNamespace, Name: testRevision}: { - Dests: sets.NewString("128.0.0.1:1234"), + Dests: sets.New("128.0.0.1:1234"), }, }, updateCnt: 1, @@ -885,7 +885,7 @@ func TestRevisionBackendManagerAddEndpoint(t *testing.T) { }, expectDests: map[types.NamespacedName]revisionDestsUpdate{ {Namespace: testNamespace, Name: testRevision}: { - Dests: sets.NewString("128.0.0.1:1234"), + Dests: sets.New("128.0.0.1:1234"), }, }, updateCnt: 1, @@ -957,8 +957,8 @@ func TestRevisionBackendManagerAddEndpoint(t *testing.T) { func emptyDests() dests { return dests{ - ready: sets.NewString(), - notReady: sets.NewString(), + ready: sets.New[string](), + notReady: sets.New[string](), } } @@ -1041,8 +1041,8 @@ func TestCheckDestsReadyToNotReady(t *testing.T) { } // Initial state. Both are ready. cur := dests{ - ready: sets.NewString("10.10.1.3", "10.10.1.2"), - notReady: sets.NewString("10.10.1.1"), + ready: sets.New("10.10.1.3", "10.10.1.2"), + notReady: sets.New("10.10.1.1"), } rw.checkDests(cur, emptyDests()) select { @@ -1066,8 +1066,8 @@ func TestCheckDestsReadyToNotReady(t *testing.T) { prev := cur cur = dests{ - ready: sets.NewString("10.10.1.2"), - notReady: sets.NewString("10.10.1.1", "10.10.1.3"), + ready: sets.New("10.10.1.2"), + notReady: sets.New("10.10.1.1", "10.10.1.3"), } rw.checkDests(cur, prev) select { @@ -1143,8 +1143,8 @@ func TestCheckDests(t *testing.T) { enableProbeOptimisation: true, } rw.checkDests(dests{ - ready: sets.NewString("10.1.1.5"), - notReady: sets.NewString("10.1.1.6"), + ready: sets.New("10.1.1.5"), + notReady: sets.New("10.1.1.6"), }, emptyDests()) select { case <-uCh: @@ -1155,8 +1155,8 @@ func TestCheckDests(t *testing.T) { close(dCh) rw.checkDests(dests{ - ready: sets.NewString("10.1.1.5"), - notReady: sets.NewString("10.1.1.6"), + ready: sets.New("10.1.1.5"), + notReady: sets.New("10.1.1.6"), }, emptyDests()) select { case <-uCh: @@ -1246,11 +1246,11 @@ func TestCheckDestsSwinging(t *testing.T) { } // First not ready, second good, clusterIP: not ready. - rw.checkDests(dests{ready: sets.NewString("10.0.0.1:1234", "10.0.0.2:1234")}, emptyDests()) + rw.checkDests(dests{ready: sets.New("10.0.0.1:1234", "10.0.0.2:1234")}, emptyDests()) want := revisionDestsUpdate{ Rev: types.NamespacedName{Namespace: testNamespace, Name: testRevision}, ClusterIPDest: "", - Dests: sets.NewString("10.0.0.2:1234"), + Dests: sets.New("10.0.0.2:1234"), } select { @@ -1263,10 +1263,10 @@ func TestCheckDestsSwinging(t *testing.T) { } // Second gone, first becomes ready, clusterIP still not ready. - rw.checkDests(dests{ready: sets.NewString("10.0.0.1:1234")}, emptyDests()) + rw.checkDests(dests{ready: sets.New("10.0.0.1:1234")}, emptyDests()) select { case got := <-uCh: - want.Dests = sets.NewString("10.0.0.1:1234") + want.Dests = sets.New("10.0.0.1:1234") if !cmp.Equal(got, want) { t.Errorf("Update = %#v, want: %#v, diff: %s", got, want, cmp.Diff(want, got)) } @@ -1275,7 +1275,7 @@ func TestCheckDestsSwinging(t *testing.T) { } // Second is back, but not healthy yet. - rw.checkDests(dests{ready: sets.NewString("10.0.0.1:1234", "10.0.0.2:1234")}, emptyDests()) + rw.checkDests(dests{ready: sets.New("10.0.0.1:1234", "10.0.0.2:1234")}, emptyDests()) select { case got := <-uCh: // No update should be sent out, since there's only healthy pod, same as above. @@ -1284,10 +1284,10 @@ func TestCheckDestsSwinging(t *testing.T) { } // All pods are happy now. - rw.checkDests(dests{ready: sets.NewString("10.0.0.1:1234", "10.0.0.2:1234")}, emptyDests()) + rw.checkDests(dests{ready: sets.New("10.0.0.1:1234", "10.0.0.2:1234")}, emptyDests()) select { case got := <-uCh: - want.Dests = sets.NewString("10.0.0.2:1234", "10.0.0.1:1234") + want.Dests = sets.New("10.0.0.2:1234", "10.0.0.1:1234") if !cmp.Equal(got, want) { t.Errorf("Update = %#v, want: %#v, diff: %s", got, want, cmp.Diff(want, got)) } @@ -1296,7 +1296,7 @@ func TestCheckDestsSwinging(t *testing.T) { } // Make sure we do not send out redundant updates. - rw.checkDests(dests{ready: sets.NewString("10.0.0.1:1234", "10.0.0.2:1234")}, emptyDests()) + rw.checkDests(dests{ready: sets.New("10.0.0.1:1234", "10.0.0.2:1234")}, emptyDests()) select { case got := <-uCh: t.Errorf("Expected no update, but got %#v", got) @@ -1306,8 +1306,8 @@ func TestCheckDestsSwinging(t *testing.T) { // Add a notReady pod, but it's not ready. No update should be sent. rw.checkDests(dests{ - ready: sets.NewString("10.0.0.1:1234", "10.0.0.2:1234"), - notReady: sets.NewString("10.0.0.4:1234"), + ready: sets.New("10.0.0.1:1234", "10.0.0.2:1234"), + notReady: sets.New("10.0.0.4:1234"), }, emptyDests()) select { case got := <-uCh: @@ -1318,12 +1318,12 @@ func TestCheckDestsSwinging(t *testing.T) { // The notReady pod is now ready! rw.checkDests(dests{ - ready: sets.NewString("10.0.0.1:1234", "10.0.0.2:1234"), - notReady: sets.NewString("10.0.0.4:1234"), + ready: sets.New("10.0.0.1:1234", "10.0.0.2:1234"), + notReady: sets.New("10.0.0.4:1234"), }, emptyDests()) select { case got := <-uCh: - want.Dests = sets.NewString("10.0.0.1:1234", "10.0.0.2:1234", "10.0.0.4:1234") + want.Dests = sets.New("10.0.0.1:1234", "10.0.0.2:1234", "10.0.0.4:1234") if !cmp.Equal(got, want) { t.Errorf("Update = %#v, want: %#v, diff: %s", got, want, cmp.Diff(want, got)) } @@ -1332,10 +1332,10 @@ func TestCheckDestsSwinging(t *testing.T) { } // Swing to a different pods. - rw.checkDests(dests{ready: sets.NewString("10.0.0.3:1234", "10.0.0.2:1234")}, emptyDests()) + rw.checkDests(dests{ready: sets.New("10.0.0.3:1234", "10.0.0.2:1234")}, emptyDests()) select { case got := <-uCh: - want.Dests = sets.NewString("10.0.0.2:1234", "10.0.0.3:1234") + want.Dests = sets.New("10.0.0.2:1234", "10.0.0.3:1234") if !cmp.Equal(got, want) { t.Errorf("Update = %#v, want: %#v, diff: %s", got, want, cmp.Diff(want, got)) } @@ -1344,10 +1344,10 @@ func TestCheckDestsSwinging(t *testing.T) { } // Scale down by 1. - rw.checkDests(dests{ready: sets.NewString("10.0.0.2:1234")}, emptyDests()) + rw.checkDests(dests{ready: sets.New("10.0.0.2:1234")}, emptyDests()) select { case got := <-uCh: - want.Dests = sets.NewString("10.0.0.2:1234") + want.Dests = sets.New("10.0.0.2:1234") if !cmp.Equal(got, want) { t.Errorf("Update = %#v, want: %#v, diff: %s", got, want, cmp.Diff(want, got)) } @@ -1524,14 +1524,14 @@ func TestServiceMoreThanOne(t *testing.T) { func TestProbePodIPs(t *testing.T) { type input struct { current dests - healthy sets.String + healthy sets.Set[string] meshMode netcfg.MeshCompatibilityMode enableProbeOptimization bool hostResponses map[string][]activatortest.FakeResponse } type expected struct { - healthy sets.String + healthy sets.Set[string] noop bool notMesh bool success bool @@ -1549,13 +1549,13 @@ func TestProbePodIPs(t *testing.T) { name: "all healthy", // Test skipping probes when all endpoints are healthy input: input{ current: dests{ - ready: sets.NewString("10.10.1.1"), - notReady: sets.NewString("10.10.1.2"), + ready: sets.New("10.10.1.1"), + notReady: sets.New("10.10.1.2"), }, - healthy: sets.NewString("10.10.1.1", "10.10.1.2"), + healthy: sets.New("10.10.1.1", "10.10.1.2"), }, expected: expected{ - healthy: sets.NewString("10.10.1.1", "10.10.1.2"), + healthy: sets.New("10.10.1.1", "10.10.1.2"), noop: true, notMesh: false, success: true, @@ -1566,7 +1566,7 @@ func TestProbePodIPs(t *testing.T) { name: "one pod fails probe", // Test that we probe all pods when one fails input: input{ current: dests{ - notReady: sets.NewString("10.10.1.1", "10.10.1.2", "10.10.1.3"), + notReady: sets.New("10.10.1.1", "10.10.1.2", "10.10.1.3"), }, hostResponses: map[string][]activatortest.FakeResponse{ "10.10.1.1": {{ @@ -1581,7 +1581,7 @@ func TestProbePodIPs(t *testing.T) { enableProbeOptimization: true, }, expected: expected{ - healthy: sets.NewString("10.10.1.2", "10.10.1.3"), + healthy: sets.New("10.10.1.2", "10.10.1.3"), noop: false, notMesh: true, success: false, @@ -1592,14 +1592,14 @@ func TestProbePodIPs(t *testing.T) { name: "ready pods skipped with mesh disabled", input: input{ current: dests{ - ready: sets.NewString("10.10.1.1"), - notReady: sets.NewString("10.10.1.2"), + ready: sets.New("10.10.1.1"), + notReady: sets.New("10.10.1.2"), }, enableProbeOptimization: true, meshMode: netcfg.MeshCompatibilityModeDisabled, }, expected: expected{ - healthy: sets.NewString("10.10.1.1", "10.10.1.2"), + healthy: sets.New("10.10.1.1", "10.10.1.2"), noop: false, notMesh: true, success: true, @@ -1610,14 +1610,14 @@ func TestProbePodIPs(t *testing.T) { name: "ready pods not skipped with mesh auto", input: input{ current: dests{ - ready: sets.NewString("10.10.1.1"), - notReady: sets.NewString("10.10.1.2"), + ready: sets.New("10.10.1.1"), + notReady: sets.New("10.10.1.2"), }, enableProbeOptimization: true, meshMode: netcfg.MeshCompatibilityModeAuto, }, expected: expected{ - healthy: sets.NewString("10.10.1.1", "10.10.1.2"), + healthy: sets.New("10.10.1.1", "10.10.1.2"), noop: false, notMesh: true, success: true, @@ -1628,14 +1628,14 @@ func TestProbePodIPs(t *testing.T) { name: "only ready pods healthy without probe optimization", // NOTE: prior test is effectively this one with probe optimization input: input{ current: dests{ - ready: sets.NewString("10.10.1.1"), - notReady: sets.NewString("10.10.1.2"), + ready: sets.New("10.10.1.1"), + notReady: sets.New("10.10.1.2"), }, enableProbeOptimization: false, meshMode: netcfg.MeshCompatibilityModeAuto, }, expected: expected{ - healthy: sets.NewString("10.10.1.1"), + healthy: sets.New("10.10.1.1"), noop: false, notMesh: true, success: true, @@ -1646,15 +1646,15 @@ func TestProbePodIPs(t *testing.T) { name: "removes non-existent pods from healthy", input: input{ current: dests{ - ready: sets.NewString("10.10.1.1"), - notReady: sets.NewString("10.10.1.2"), + ready: sets.New("10.10.1.1"), + notReady: sets.New("10.10.1.2"), }, - healthy: sets.NewString("10.10.1.1", "10.10.1.2", "10.10.1.3"), + healthy: sets.New("10.10.1.1", "10.10.1.2", "10.10.1.3"), enableProbeOptimization: true, meshMode: netcfg.MeshCompatibilityModeDisabled, }, expected: expected{ - healthy: sets.NewString("10.10.1.1", "10.10.1.2"), + healthy: sets.New("10.10.1.1", "10.10.1.2"), noop: false, notMesh: false, success: true, @@ -1665,15 +1665,15 @@ func TestProbePodIPs(t *testing.T) { name: "non-probe additions count as changes", // Testing case where ready pods are added but probes do not add more input: input{ current: dests{ - ready: sets.NewString("10.10.1.1", "10.10.1.2"), - notReady: sets.NewString("10.10.1.3"), + ready: sets.New("10.10.1.1", "10.10.1.2"), + notReady: sets.New("10.10.1.3"), }, - healthy: sets.NewString("10.10.1.1"), + healthy: sets.New("10.10.1.1"), enableProbeOptimization: false, meshMode: netcfg.MeshCompatibilityModeDisabled, }, expected: expected{ - healthy: sets.NewString("10.10.1.1", "10.10.1.2"), + healthy: sets.New("10.10.1.1", "10.10.1.2"), noop: false, notMesh: true, success: true, @@ -1684,15 +1684,15 @@ func TestProbePodIPs(t *testing.T) { name: "non-probe removals count as changes", // Testing case where non-existent pods are removed with no probe changes input: input{ current: dests{ - ready: sets.NewString("10.10.1.1"), - notReady: sets.NewString("10.10.1.3"), + ready: sets.New("10.10.1.1"), + notReady: sets.New("10.10.1.3"), }, - healthy: sets.NewString("10.10.1.1", "10.10.1.2"), + healthy: sets.New("10.10.1.1", "10.10.1.2"), enableProbeOptimization: false, meshMode: netcfg.MeshCompatibilityModeDisabled, }, expected: expected{ - healthy: sets.NewString("10.10.1.1"), + healthy: sets.New("10.10.1.1"), noop: false, notMesh: true, success: true, @@ -1703,15 +1703,15 @@ func TestProbePodIPs(t *testing.T) { name: "no changes with probes", input: input{ current: dests{ - ready: sets.NewString("10.10.1.1"), - notReady: sets.NewString("10.10.1.3"), + ready: sets.New("10.10.1.1"), + notReady: sets.New("10.10.1.3"), }, - healthy: sets.NewString("10.10.1.1"), + healthy: sets.New("10.10.1.1"), enableProbeOptimization: false, meshMode: netcfg.MeshCompatibilityModeDisabled, }, expected: expected{ - healthy: sets.NewString("10.10.1.1"), + healthy: sets.New("10.10.1.1"), noop: true, notMesh: true, success: true, @@ -1722,14 +1722,14 @@ func TestProbePodIPs(t *testing.T) { name: "no changes without probes", input: input{ current: dests{ - ready: sets.NewString("10.10.1.1", "10.10.1.3"), + ready: sets.New("10.10.1.1", "10.10.1.3"), }, - healthy: sets.NewString("10.10.1.1", "10.10.1.3"), + healthy: sets.New("10.10.1.1", "10.10.1.3"), enableProbeOptimization: false, meshMode: netcfg.MeshCompatibilityModeDisabled, }, expected: expected{ - healthy: sets.NewString("10.10.1.1", "10.10.1.3"), + healthy: sets.New("10.10.1.1", "10.10.1.3"), noop: true, notMesh: false, success: true, @@ -1740,10 +1740,10 @@ func TestProbePodIPs(t *testing.T) { name: "mesh probe error", input: input{ current: dests{ - ready: sets.NewString("10.10.1.1"), - notReady: sets.NewString("10.10.1.3"), + ready: sets.New("10.10.1.1"), + notReady: sets.New("10.10.1.3"), }, - healthy: sets.NewString("10.10.1.1"), + healthy: sets.New("10.10.1.1"), enableProbeOptimization: true, meshMode: netcfg.MeshCompatibilityModeAuto, hostResponses: map[string][]activatortest.FakeResponse{ @@ -1753,7 +1753,7 @@ func TestProbePodIPs(t *testing.T) { }, }, expected: expected{ - healthy: sets.NewString("10.10.1.1"), + healthy: sets.New("10.10.1.1"), noop: true, notMesh: false, success: false, diff --git a/pkg/activator/net/throttler.go b/pkg/activator/net/throttler.go index 91573168106c..df6793f3e02a 100644 --- a/pkg/activator/net/throttler.go +++ b/pkg/activator/net/throttler.go @@ -24,6 +24,7 @@ import ( "go.uber.org/atomic" "go.uber.org/zap" + "k8s.io/apimachinery/pkg/util/sets" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -626,7 +627,7 @@ func (rt *revisionThrottler) handlePubEpsUpdate(eps *corev1.Endpoints, selfIP st } // We are using List to have the IP addresses sorted for consistent results. - epsL := epSet.List() + epsL := sets.List(epSet) newNA, newAI := int32(len(epsL)), int32(inferIndex(epsL, selfIP)) if newAI == -1 { // No need to do anything, this activator is not in path. diff --git a/pkg/activator/net/throttler_test.go b/pkg/activator/net/throttler_test.go index 16df2f2c4630..ded1a9363d56 100644 --- a/pkg/activator/net/throttler_test.go +++ b/pkg/activator/net/throttler_test.go @@ -310,7 +310,7 @@ func TestThrottlerErrorNoRevision(t *testing.T) { throttler := newTestThrottler(ctx) throttler.handleUpdate(revisionDestsUpdate{ Rev: revID, - Dests: sets.NewString("128.0.0.1:1234"), + Dests: sets.New("128.0.0.1:1234"), }) // Make sure it now works. @@ -361,7 +361,7 @@ func TestThrottlerErrorOneTimesOut(t *testing.T) { throttler.handleUpdate(revisionDestsUpdate{ Rev: revID, ClusterIPDest: "129.0.0.1:1234", - Dests: sets.NewString("128.0.0.1:1234"), + Dests: sets.New("128.0.0.1:1234"), }) // Send 2 requests, one should time out. @@ -402,88 +402,88 @@ func TestThrottlerSuccesses(t *testing.T) { revision *v1.Revision initUpdates []revisionDestsUpdate requests int - wantDests sets.String + wantDests sets.Set[string] }{{ name: "single healthy podIP", revision: revisionCC1(types.NamespacedName{Namespace: testNamespace, Name: testRevision}, pkgnet.ProtocolHTTP1), initUpdates: []revisionDestsUpdate{{ Rev: types.NamespacedName{Namespace: testNamespace, Name: testRevision}, - Dests: sets.NewString("128.0.0.1:1234"), + Dests: sets.New("128.0.0.1:1234"), }, { Rev: types.NamespacedName{Namespace: testNamespace, Name: testRevision}, - Dests: sets.NewString("128.0.0.1:1234"), + Dests: sets.New("128.0.0.1:1234"), }}, requests: 1, - wantDests: sets.NewString("128.0.0.1:1234"), + wantDests: sets.New("128.0.0.1:1234"), }, { name: "single healthy podIP, infinite cc", revision: revision(types.NamespacedName{Namespace: testNamespace, Name: testRevision}, pkgnet.ProtocolHTTP1, 0), // Double updates exercise additional paths. initUpdates: []revisionDestsUpdate{{ Rev: types.NamespacedName{Namespace: testNamespace, Name: testRevision}, - Dests: sets.NewString("128.0.0.2:1234", "128.0.0.32:1212"), + Dests: sets.New("128.0.0.2:1234", "128.0.0.32:1212"), }, { Rev: types.NamespacedName{Namespace: testNamespace, Name: testRevision}, - Dests: sets.NewString("128.0.0.1:1234"), + Dests: sets.New("128.0.0.1:1234"), }}, requests: 1, - wantDests: sets.NewString("128.0.0.1:1234"), + wantDests: sets.New("128.0.0.1:1234"), }, { name: "single healthy clusterIP", revision: revisionCC1(types.NamespacedName{Namespace: testNamespace, Name: testRevision}, pkgnet.ProtocolHTTP1), initUpdates: []revisionDestsUpdate{{ Rev: types.NamespacedName{Namespace: testNamespace, Name: testRevision}, - Dests: sets.NewString("128.0.0.1:1234", "128.0.0.2:1234"), + Dests: sets.New("128.0.0.1:1234", "128.0.0.2:1234"), }, { Rev: types.NamespacedName{Namespace: testNamespace, Name: testRevision}, ClusterIPDest: "129.0.0.1:1234", - Dests: sets.NewString("128.0.0.1:1234"), + Dests: sets.New("128.0.0.1:1234"), }}, requests: 1, - wantDests: sets.NewString("129.0.0.1:1234"), + wantDests: sets.New("129.0.0.1:1234"), }, { name: "spread podIP load", revision: revisionCC1(types.NamespacedName{Namespace: testNamespace, Name: testRevision}, pkgnet.ProtocolHTTP1), initUpdates: []revisionDestsUpdate{{ // Double update here exercises some additional paths. Rev: types.NamespacedName{Namespace: testNamespace, Name: testRevision}, - Dests: sets.NewString("128.0.0.3:1234"), + Dests: sets.New("128.0.0.3:1234"), }, { Rev: types.NamespacedName{Namespace: testNamespace, Name: testRevision}, - Dests: sets.NewString("128.0.0.1:1234", "128.0.0.2:1234"), + Dests: sets.New("128.0.0.1:1234", "128.0.0.2:1234"), }}, requests: 2, - wantDests: sets.NewString("128.0.0.2:1234", "128.0.0.1:1234"), + wantDests: sets.New("128.0.0.2:1234", "128.0.0.1:1234"), }, { name: "clumping test", revision: revision(types.NamespacedName{Namespace: testNamespace, Name: testRevision}, pkgnet.ProtocolHTTP1, 3), initUpdates: []revisionDestsUpdate{{ Rev: types.NamespacedName{Namespace: testNamespace, Name: testRevision}, - Dests: sets.NewString("128.0.0.1:1234", "128.0.0.2:1234", "128.0.0.2:4236", "128.0.0.2:1233", "128.0.0.2:1230"), + Dests: sets.New("128.0.0.1:1234", "128.0.0.2:1234", "128.0.0.2:4236", "128.0.0.2:1233", "128.0.0.2:1230"), }}, requests: 3, - wantDests: sets.NewString("128.0.0.1:1234"), + wantDests: sets.New("128.0.0.1:1234"), }, { name: "roundrobin test", revision: revision(types.NamespacedName{Namespace: testNamespace, Name: testRevision}, pkgnet.ProtocolHTTP1, 5 /*cc >3*/), initUpdates: []revisionDestsUpdate{{ Rev: types.NamespacedName{Namespace: testNamespace, Name: testRevision}, - Dests: sets.NewString("128.0.0.1:1234", "128.0.0.2:1234", "211.212.213.214"), + Dests: sets.New("128.0.0.1:1234", "128.0.0.2:1234", "211.212.213.214"), }}, requests: 3, // All three IP addresses should be used if cc>3. - wantDests: sets.NewString("128.0.0.1:1234", "128.0.0.2:1234", "211.212.213.214"), + wantDests: sets.New("128.0.0.1:1234", "128.0.0.2:1234", "211.212.213.214"), }, { name: "multiple ClusterIP requests", revision: revisionCC1(types.NamespacedName{Namespace: testNamespace, Name: testRevision}, pkgnet.ProtocolHTTP1), initUpdates: []revisionDestsUpdate{{ Rev: types.NamespacedName{Namespace: testNamespace, Name: testRevision}, ClusterIPDest: "129.0.0.1:1234", - Dests: sets.NewString("128.0.0.1:1234", "128.0.0.2:1234"), + Dests: sets.New("128.0.0.1:1234", "128.0.0.2:1234"), }}, requests: 2, - wantDests: sets.NewString("129.0.0.1:1234"), + wantDests: sets.New("129.0.0.1:1234"), }} { t.Run(tc.name, func(t *testing.T) { ctx, cancel, _ := rtesting.SetupFakeContextWithCancel(t) @@ -582,13 +582,13 @@ func TestThrottlerSuccesses(t *testing.T) { return nil }) - gotDests := sets.NewString() + gotDests := sets.New[string]() for i := 0; i < tc.requests; i++ { result := <-resultChan gotDests.Insert(result.dest) } - if got, want := gotDests.List(), tc.wantDests.List(); !cmp.Equal(want, got) { + if got, want := sets.List(gotDests), sets.List(tc.wantDests); !cmp.Equal(want, got) { t.Errorf("Dests = %v, want: %v, diff: %s", got, want, cmp.Diff(want, got)) rt.mux.RLock() defer rt.mux.RUnlock() @@ -599,8 +599,8 @@ func TestThrottlerSuccesses(t *testing.T) { } } -func trackerDestSet(ts []*podTracker) sets.String { - ret := sets.NewString() +func trackerDestSet(ts []*podTracker) sets.Set[string] { + ret := sets.New[string]() for _, t := range ts { ret.Insert(t.dest) } @@ -625,7 +625,7 @@ func TestPodAssignmentFinite(t *testing.T) { update := revisionDestsUpdate{ Rev: revName, ClusterIPDest: "", - Dests: sets.NewString("ip4", "ip3", "ip5", "ip2", "ip1", "ip0"), + Dests: sets.New("ip4", "ip3", "ip5", "ip2", "ip1", "ip0"), } // This should synchronously update throughout the system. // And now we can inspect `rt`. @@ -634,7 +634,7 @@ func TestPodAssignmentFinite(t *testing.T) { t.Errorf("NumTrackers = %d, want: %d", got, want) } // 6 = 4 * 1 + 2; index 0 and index 1 have 2 pods and others have 1 pod. - if got, want := trackerDestSet(rt.assignedTrackers), sets.NewString("ip0", "ip4"); !got.Equal(want) { + if got, want := trackerDestSet(rt.assignedTrackers), sets.New("ip0", "ip4"); !got.Equal(want) { t.Errorf("Assigned trackers = %v, want: %v, diff: %s", got, want, cmp.Diff(want, got)) } if got, want := rt.breaker.Capacity(), 2*42; got != want { @@ -675,7 +675,7 @@ func TestPodAssignmentInfinite(t *testing.T) { update := revisionDestsUpdate{ Rev: revName, ClusterIPDest: "", - Dests: sets.NewString("ip3", "ip2", "ip1"), + Dests: sets.New("ip3", "ip2", "ip1"), } // This should synchronously update throughout the system. // And now we can inspect `rt`. @@ -740,7 +740,7 @@ func TestActivatorsIndexUpdate(t *testing.T) { waitInformers() }() - possibleDests := sets.NewString("128.0.0.1:1234", "128.0.0.2:1234", "128.0.0.23:1234") + possibleDests := sets.New("128.0.0.1:1234", "128.0.0.2:1234", "128.0.0.23:1234") updateCh <- revisionDestsUpdate{ Rev: revID, Dests: possibleDests, @@ -836,7 +836,7 @@ func TestMultipleActivators(t *testing.T) { }() revID := types.NamespacedName{Namespace: testNamespace, Name: testRevision} - possibleDests := sets.NewString("128.0.0.1:1234", "128.0.0.2:1234", "128.0.0.23:1234") + possibleDests := sets.New("128.0.0.1:1234", "128.0.0.2:1234", "128.0.0.23:1234") updateCh <- revisionDestsUpdate{ Rev: revID, Dests: possibleDests, diff --git a/pkg/activator/stat_reporter_test.go b/pkg/activator/stat_reporter_test.go index 7bbb5d5b8255..02fb68c11772 100644 --- a/pkg/activator/stat_reporter_test.go +++ b/pkg/activator/stat_reporter_test.go @@ -81,9 +81,9 @@ func TestReportStats(t *testing.T) { statNames = append(statNames, m.ToStatMessage().Key.Name) } } - want := sets.NewString("first-a", "first-b", "second-a", "second-b") - if got := sets.NewString(statNames...); !got.Equal(want) { - t.Error("Expected to receive all stats (-want, +got):", cmp.Diff(want.List(), got.List())) + want := sets.New("first-a", "first-b", "second-a", "second-b") + if got := sets.New(statNames...); !got.Equal(want) { + t.Error("Expected to receive all stats (-want, +got):", cmp.Diff(sets.List(want), sets.List(got))) } case <-time.After(2 * time.Second): t.Fatal("Did not receive results after 2 seconds") diff --git a/pkg/apis/serving/k8s_validation.go b/pkg/apis/serving/k8s_validation.go index 797dfb38a508..071002fbdedd 100644 --- a/pkg/apis/serving/k8s_validation.go +++ b/pkg/apis/serving/k8s_validation.go @@ -40,7 +40,7 @@ const ( ) var ( - reservedPaths = sets.NewString( + reservedPaths = sets.New( "/", "/dev", "/dev/log", // Should be a domain socket @@ -49,11 +49,11 @@ var ( "/var/log", ) - reservedContainerNames = sets.NewString( + reservedContainerNames = sets.New( "queue-proxy", ) - reservedEnvVars = sets.NewString( + reservedEnvVars = sets.New( "PORT", "K_SERVICE", "K_CONFIGURATION", @@ -68,13 +68,13 @@ var ( networking.UserQueueMetricsPort, profiling.ProfilingPort) - reservedSidecarEnvVars = reservedEnvVars.Difference(sets.NewString("PORT")) + reservedSidecarEnvVars = reservedEnvVars.Difference(sets.New("PORT")) // The port is named "user-port" on the deployment, but a user cannot set an arbitrary name on the port // in Configuration. The name field is reserved for content-negotiation. Currently 'h2c' and 'http1' are // allowed. // https://github.com/knative/serving/blob/main/docs/runtime-contract.md#inbound-network-connectivity - validPortNames = sets.NewString( + validPortNames = sets.New( "h2c", "http1", "", @@ -82,7 +82,7 @@ var ( ) // ValidateVolumes validates the Volumes of a PodSpec. -func ValidateVolumes(ctx context.Context, vs []corev1.Volume, mountedVolumes sets.String) (map[string]corev1.Volume, *apis.FieldError) { +func ValidateVolumes(ctx context.Context, vs []corev1.Volume, mountedVolumes sets.Set[string]) (map[string]corev1.Volume, *apis.FieldError) { volumes := make(map[string]corev1.Volume, len(vs)) var errs *apis.FieldError for i, volume := range vs { @@ -297,7 +297,7 @@ func validateEnvValueFrom(ctx context.Context, source *corev1.EnvVarSource) *api return apis.CheckDisallowedFields(*source, *EnvVarSourceMask(source, features.PodSpecFieldRef != config.Disabled)) } -func getReservedEnvVarsPerContainerType(ctx context.Context) sets.String { +func getReservedEnvVarsPerContainerType(ctx context.Context) sets.Set[string] { if IsInSidecarContainer(ctx) || IsInitContainer(ctx) { return reservedSidecarEnvVars } @@ -412,7 +412,7 @@ func validateInitContainers(ctx context.Context, containers, otherContainers []c return errs.Also(&apis.FieldError{Message: fmt.Sprintf("pod spec support for init-containers is off, "+ "but found %d init containers", len(containers))}) } - allNames := make(sets.String, len(otherContainers)+len(containers)) + allNames := make(sets.Set[string], len(otherContainers)+len(containers)) for _, ctr := range otherContainers { allNames.Insert(ctr.Name) } @@ -434,7 +434,7 @@ func validateContainers(ctx context.Context, containers []corev1.Container, volu return errs.Also(&apis.FieldError{Message: fmt.Sprintf("multi-container is off, "+ "but found %d containers", len(containers))}) } - allNames := make(sets.String, len(containers)) + allNames := make(sets.Set[string], len(containers)) for i := range containers { if allNames.Has(containers[i].Name) { errs = errs.Also(&apis.FieldError{ @@ -457,8 +457,8 @@ func validateContainers(ctx context.Context, containers []corev1.Container, volu } // AllMountedVolumes returns all the mounted volumes in all the containers. -func AllMountedVolumes(containers []corev1.Container) sets.String { - volumeNames := sets.NewString() +func AllMountedVolumes(containers []corev1.Container) sets.Set[string] { + volumeNames := sets.New[string]() for _, c := range containers { for _, vm := range c.VolumeMounts { volumeNames.Insert(vm.Name) @@ -649,8 +649,8 @@ func validateVolumeMounts(mounts []corev1.VolumeMount, volumes map[string]corev1 var errs *apis.FieldError // Check that volume mounts match names in "volumes", that "volumes" has 100% // coverage, and the field restrictions. - seenName := make(sets.String, len(mounts)) - seenMountPath := make(sets.String, len(mounts)) + seenName := make(sets.Set[string], len(mounts)) + seenMountPath := make(sets.Set[string], len(mounts)) for i := range mounts { vm := mounts[i] errs = errs.Also(apis.CheckDisallowedFields(vm, *VolumeMountMask(&vm)).ViaIndex(i)) diff --git a/pkg/apis/serving/v1/revision_defaults.go b/pkg/apis/serving/v1/revision_defaults.go index 6919a25f0587..1599952c6d0d 100644 --- a/pkg/apis/serving/v1/revision_defaults.go +++ b/pkg/apis/serving/v1/revision_defaults.go @@ -75,7 +75,7 @@ func (rs *RevisionSpec) SetDefaults(ctx context.Context) { } // Avoid clashes with user-supplied names when generating defaults. - containerNames := make(sets.String, len(rs.PodSpec.Containers)+len(rs.PodSpec.InitContainers)) + containerNames := make(sets.Set[string], len(rs.PodSpec.Containers)+len(rs.PodSpec.InitContainers)) for idx := range rs.PodSpec.Containers { containerNames.Insert(rs.PodSpec.Containers[idx].Name) } @@ -139,7 +139,7 @@ func (rs *RevisionSpec) applyDefault(ctx context.Context, container *corev1.Cont rs.PodSpec.EnableServiceLinks = cfg.Defaults.EnableServiceLinks } - vNames := make(sets.String) + vNames := make(sets.Set[string]) for _, v := range rs.PodSpec.Volumes { if v.EmptyDir != nil || v.PersistentVolumeClaim != nil { vNames.Insert(v.Name) @@ -238,7 +238,7 @@ func (rs *RevisionSpec) defaultSecurityContext(psc *corev1.PodSecurityContext, c } } -func applyDefaultContainerNames(containers []corev1.Container, containerNames sets.String, defaultContainerName string) { +func applyDefaultContainerNames(containers []corev1.Container, containerNames sets.Set[string], defaultContainerName string) { // Default container name based on ContainerNameFromTemplate value from configmap. // In multi-container or init-container mode, add a numeric suffix, avoiding clashes with user-supplied names. nextSuffix := 0 diff --git a/pkg/autoscaler/bucket/bucket.go b/pkg/autoscaler/bucket/bucket.go index 8185245b5a1a..a6eb2fad4710 100644 --- a/pkg/autoscaler/bucket/bucket.go +++ b/pkg/autoscaler/bucket/bucket.go @@ -45,7 +45,7 @@ func AutoscalerBucketName(ordinal, total uint32) string { // AutoscalerBucketSet returns a hash.BucketSet consisting of Autoscaler // buckets with the given `total` count. func AutoscalerBucketSet(total uint32) *hash.BucketSet { - names := make(sets.String, total) + names := make(sets.Set[string], total) for i := uint32(0); i < total; i++ { names.Insert(AutoscalerBucketName(i, total)) } diff --git a/pkg/autoscaler/metrics/stats_scraper_test.go b/pkg/autoscaler/metrics/stats_scraper_test.go index 4c1892d12792..2fffac4ba570 100644 --- a/pkg/autoscaler/metrics/stats_scraper_test.go +++ b/pkg/autoscaler/metrics/stats_scraper_test.go @@ -639,7 +639,7 @@ func TestOldPodShuffle(t *testing.T) { } // Store and reset. firstRun := client.urls - client.urls = sets.NewString() + client.urls = sets.New[string]() _, err = scraper.Scrape(defaultMetric.Spec.StableWindow) if err != nil { @@ -820,7 +820,7 @@ func newTestScrapeClient(stats []Stat, errs []error) *fakeScrapeClient { return &fakeScrapeClient{ stats: stats, errs: errs, - urls: sets.NewString(), + urls: sets.New[string](), } } @@ -828,7 +828,7 @@ type fakeScrapeClient struct { curIdx int stats []Stat errs []error - urls sets.String + urls sets.Set[string] mutex sync.Mutex } diff --git a/pkg/autoscaler/statforwarder/forwarder_test.go b/pkg/autoscaler/statforwarder/forwarder_test.go index 0b44a7972c2a..22942c544fa2 100644 --- a/pkg/autoscaler/statforwarder/forwarder_test.go +++ b/pkg/autoscaler/statforwarder/forwarder_test.go @@ -54,7 +54,7 @@ var ( testHolder1 = "autoscaler-1_" + testIP1 testHolder2 = "autoscaler-2_" + testIP2 testNs = system.Namespace() - testBs = hash.NewBucketSet(sets.NewString(bucket1)) + testBs = hash.NewBucketSet(sets.New(bucket1)) testLease = &coordinationv1.Lease{ ObjectMeta: metav1.ObjectMeta{ Name: bucket1, @@ -423,7 +423,7 @@ func TestProcess(t *testing.T) { acceptCh <- acceptCount } os.Setenv("POD_IP", testIP1) - f := New(ctx, hash.NewBucketSet(sets.NewString(bucket1, bucket2))) + f := New(ctx, hash.NewBucketSet(sets.New(bucket1, bucket2))) must(t, LeaseBasedProcessor(ctx, f, accept)) // A Forward without any leadership information should process with retry. diff --git a/pkg/deployment/config.go b/pkg/deployment/config.go index 345f03e9982a..d35cc814111b 100644 --- a/pkg/deployment/config.go +++ b/pkg/deployment/config.go @@ -101,13 +101,13 @@ func defaultConfig() *Config { cfg := &Config{ ProgressDeadline: ProgressDeadlineDefault, DigestResolutionTimeout: digestResolutionTimeoutDefault, - RegistriesSkippingTagResolving: sets.NewString("kind.local", "ko.local", "dev.local"), + RegistriesSkippingTagResolving: sets.New("kind.local", "ko.local", "dev.local"), QueueSidecarCPURequest: &QueueSidecarCPURequestDefault, } // The following code is needed for ConfigMap testing. // defaultConfig must match the example in deployment.yaml which includes: `queue-sidecar-token-audiences: ""` if cfg.QueueSidecarTokenAudiences == nil { - cfg.QueueSidecarTokenAudiences = sets.NewString("") + cfg.QueueSidecarTokenAudiences = sets.New("") } return cfg @@ -179,7 +179,7 @@ type Config struct { QueueSidecarImage string // Repositories for which tag to digest resolving should be skipped. - RegistriesSkippingTagResolving sets.String + RegistriesSkippingTagResolving sets.Set[string] // DigestResolutionTimeout is the maximum time allowed for image digest resolution. DigestResolutionTimeout time.Duration @@ -210,7 +210,7 @@ type Config struct { // QueueSidecarTokenAudiences is a set of strings defining required tokens - each string represent the token audience // used by the queue proxy sidecar container to create tokens for qpoptions. - QueueSidecarTokenAudiences sets.String + QueueSidecarTokenAudiences sets.Set[string] // QueueSidecarRootCA is a root certificate to be trusted by the queue proxy sidecar qpoptions. QueueSidecarRootCA string diff --git a/pkg/deployment/config_test.go b/pkg/deployment/config_test.go index ed2108422ded..014042545a3c 100644 --- a/pkg/deployment/config_test.go +++ b/pkg/deployment/config_test.go @@ -40,11 +40,11 @@ func TestMatchingExceptions(t *testing.T) { cfg := defaultConfig() if delta := cfg.RegistriesSkippingTagResolving.Difference(shared.DigestResolutionExceptions); delta.Len() > 0 { - t.Error("Got extra:", delta.List()) + t.Error("Got extra:", sets.List(delta)) } if delta := shared.DigestResolutionExceptions.Difference(cfg.RegistriesSkippingTagResolving); delta.Len() > 0 { - t.Error("Didn't get:", delta.List()) + t.Error("Didn't get:", sets.List(delta)) } } @@ -83,11 +83,11 @@ func TestControllerConfiguration(t *testing.T) { }{{ name: "controller configuration with bad registries", wantConfig: &Config{ - RegistriesSkippingTagResolving: sets.NewString("ko.local", ""), + RegistriesSkippingTagResolving: sets.New("ko.local", ""), DigestResolutionTimeout: digestResolutionTimeoutDefault, QueueSidecarImage: defaultSidecarImage, QueueSidecarCPURequest: &QueueSidecarCPURequestDefault, - QueueSidecarTokenAudiences: sets.NewString("foo", "bar", "boo-srv"), + QueueSidecarTokenAudiences: sets.New("foo", "bar", "boo-srv"), ProgressDeadline: ProgressDeadlineDefault, }, data: map[string]string{ @@ -98,11 +98,11 @@ func TestControllerConfiguration(t *testing.T) { }, { name: "controller configuration good progress deadline", wantConfig: &Config{ - RegistriesSkippingTagResolving: sets.NewString("kind.local", "ko.local", "dev.local"), + RegistriesSkippingTagResolving: sets.New("kind.local", "ko.local", "dev.local"), DigestResolutionTimeout: digestResolutionTimeoutDefault, QueueSidecarImage: defaultSidecarImage, QueueSidecarCPURequest: &QueueSidecarCPURequestDefault, - QueueSidecarTokenAudiences: sets.NewString(""), + QueueSidecarTokenAudiences: sets.New(""), ProgressDeadline: 444 * time.Second, }, data: map[string]string{ @@ -112,11 +112,11 @@ func TestControllerConfiguration(t *testing.T) { }, { name: "controller configuration good digest resolution timeout", wantConfig: &Config{ - RegistriesSkippingTagResolving: sets.NewString("kind.local", "ko.local", "dev.local"), + RegistriesSkippingTagResolving: sets.New("kind.local", "ko.local", "dev.local"), DigestResolutionTimeout: 60 * time.Second, QueueSidecarImage: defaultSidecarImage, QueueSidecarCPURequest: &QueueSidecarCPURequestDefault, - QueueSidecarTokenAudiences: sets.NewString(""), + QueueSidecarTokenAudiences: sets.New(""), ProgressDeadline: ProgressDeadlineDefault, }, data: map[string]string{ @@ -126,11 +126,11 @@ func TestControllerConfiguration(t *testing.T) { }, { name: "controller configuration with registries", wantConfig: &Config{ - RegistriesSkippingTagResolving: sets.NewString("ko.local", "ko.dev"), + RegistriesSkippingTagResolving: sets.New("ko.local", "ko.dev"), DigestResolutionTimeout: digestResolutionTimeoutDefault, QueueSidecarImage: defaultSidecarImage, QueueSidecarCPURequest: &QueueSidecarCPURequestDefault, - QueueSidecarTokenAudiences: sets.NewString(""), + QueueSidecarTokenAudiences: sets.New(""), ProgressDeadline: ProgressDeadlineDefault, }, data: map[string]string{ @@ -140,7 +140,7 @@ func TestControllerConfiguration(t *testing.T) { }, { name: "controller configuration with custom queue sidecar resource request/limits", wantConfig: &Config{ - RegistriesSkippingTagResolving: sets.NewString("kind.local", "ko.local", "dev.local"), + RegistriesSkippingTagResolving: sets.New("kind.local", "ko.local", "dev.local"), DigestResolutionTimeout: digestResolutionTimeoutDefault, QueueSidecarImage: defaultSidecarImage, ProgressDeadline: ProgressDeadlineDefault, @@ -150,7 +150,7 @@ func TestControllerConfiguration(t *testing.T) { QueueSidecarCPULimit: quantity("987M"), QueueSidecarMemoryLimit: quantity("654m"), QueueSidecarEphemeralStorageLimit: quantity("321M"), - QueueSidecarTokenAudiences: sets.NewString(""), + QueueSidecarTokenAudiences: sets.New(""), }, data: map[string]string{ QueueSidecarImageKey: defaultSidecarImage, @@ -219,14 +219,14 @@ func TestControllerConfiguration(t *testing.T) { QueueSidecarImage: "1", ProgressDeadline: 2 * time.Second, DigestResolutionTimeout: 3 * time.Second, - RegistriesSkippingTagResolving: sets.NewString("4"), + RegistriesSkippingTagResolving: sets.New("4"), QueueSidecarCPURequest: quantity("5m"), QueueSidecarCPULimit: quantity("6m"), QueueSidecarMemoryRequest: quantity("7M"), QueueSidecarMemoryLimit: quantity("8M"), QueueSidecarEphemeralStorageRequest: quantity("9M"), QueueSidecarEphemeralStorageLimit: quantity("10M"), - QueueSidecarTokenAudiences: sets.NewString(""), + QueueSidecarTokenAudiences: sets.New(""), }, }, { name: "newer key case takes priority", @@ -260,14 +260,14 @@ func TestControllerConfiguration(t *testing.T) { QueueSidecarImage: "12", ProgressDeadline: 13 * time.Second, DigestResolutionTimeout: 14 * time.Second, - RegistriesSkippingTagResolving: sets.NewString("15"), + RegistriesSkippingTagResolving: sets.New("15"), QueueSidecarCPURequest: quantity("16m"), QueueSidecarCPULimit: quantity("17m"), QueueSidecarMemoryRequest: quantity("18M"), QueueSidecarMemoryLimit: quantity("19M"), QueueSidecarEphemeralStorageRequest: quantity("20M"), QueueSidecarEphemeralStorageLimit: quantity("21M"), - QueueSidecarTokenAudiences: sets.NewString("foo"), + QueueSidecarTokenAudiences: sets.New("foo"), }, }} diff --git a/pkg/deployment/zz_generated.deepcopy.go b/pkg/deployment/zz_generated.deepcopy.go index 1493c29edf0b..f7362289a566 100644 --- a/pkg/deployment/zz_generated.deepcopy.go +++ b/pkg/deployment/zz_generated.deepcopy.go @@ -30,7 +30,7 @@ func (in *Config) DeepCopyInto(out *Config) { *out = *in if in.RegistriesSkippingTagResolving != nil { in, out := &in.RegistriesSkippingTagResolving, &out.RegistriesSkippingTagResolving - *out = make(sets.String, len(*in)) + *out = make(sets.Set[string], len(*in)) for key, val := range *in { (*out)[key] = val } @@ -67,7 +67,7 @@ func (in *Config) DeepCopyInto(out *Config) { } if in.QueueSidecarTokenAudiences != nil { in, out := &in.QueueSidecarTokenAudiences, &out.QueueSidecarTokenAudiences - *out = make(sets.String, len(*in)) + *out = make(sets.Set[string], len(*in)) for key, val := range *in { (*out)[key] = val } diff --git a/pkg/reconciler/domainmapping/resources/ingress.go b/pkg/reconciler/domainmapping/resources/ingress.go index 4d0bc75787c6..405a1f24fe59 100644 --- a/pkg/reconciler/domainmapping/resources/ingress.go +++ b/pkg/reconciler/domainmapping/resources/ingress.go @@ -36,7 +36,7 @@ import ( // KIngress). The created ingress will contain a RewriteHost rule to cause the // given hostName to be used as the host. func MakeIngress(dm *servingv1beta1.DomainMapping, backendServiceName, hostName, ingressClass string, httpOption netv1alpha1.HTTPOption, tls []netv1alpha1.IngressTLS, acmeChallenges ...netv1alpha1.HTTP01Challenge) *netv1alpha1.Ingress { - paths, hosts := routeresources.MakeACMEIngressPaths(acmeChallenges, sets.NewString(dm.GetName())) + paths, hosts := routeresources.MakeACMEIngressPaths(acmeChallenges, sets.New(dm.GetName())) return &netv1alpha1.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: kmeta.ChildName(dm.GetName(), ""), diff --git a/pkg/reconciler/labeler/accessors.go b/pkg/reconciler/labeler/accessors.go index 3e982c6b7dd7..bd30b793e87c 100644 --- a/pkg/reconciler/labeler/accessors.go +++ b/pkg/reconciler/labeler/accessors.go @@ -219,12 +219,12 @@ func (c *configurationAccessor) list(ns, routeName string, state v1.RoutingState // GetListAnnValue finds a given value in a comma-separated annotation. // returns the entire annotation value and true if found. -func GetListAnnValue(annotations map[string]string, key string) sets.String { +func GetListAnnValue(annotations map[string]string, key string) sets.Set[string] { l := annotations[key] if l == "" { - return sets.String{} + return sets.Set[string]{} } - return sets.NewString(strings.Split(l, ",")...) + return sets.New(strings.Split(l, ",")...) } // patch implements Accessor diff --git a/pkg/reconciler/labeler/metasync.go b/pkg/reconciler/labeler/metasync.go index 3e0085a73bc3..fab34d95d264 100644 --- a/pkg/reconciler/labeler/metasync.go +++ b/pkg/reconciler/labeler/metasync.go @@ -33,8 +33,8 @@ import ( // syncRoutingMeta makes sure that the revisions and configurations referenced from // a Route are labeled with the routingState label and routes annotation. func syncRoutingMeta(ctx context.Context, r *v1.Route, cacc *configurationAccessor, racc *revisionAccessor) error { - revisions := sets.NewString() - configs := sets.NewString() + revisions := sets.New[string]() + configs := sets.New[string]() // Walk the Route's .status.traffic and .spec.traffic and build a list // of revisions and configurations to label @@ -110,7 +110,7 @@ func clearRoutingMeta(ctx context.Context, r *v1.Route, accs ...accessor) error // setMetaForListed uses the accessor to attach the label for this route to every element // listed within "names" in the same namespace. -func setMetaForListed(ctx context.Context, route *v1.Route, acc accessor, names sets.String) error { +func setMetaForListed(ctx context.Context, route *v1.Route, acc accessor, names sets.Set[string]) error { for name := range names { if err := setRoutingMeta(ctx, acc, route, name, false); err != nil { return fmt.Errorf("failed to add route annotation to Namespace=%s Name=%q: %w", route.Namespace, name, err) @@ -122,7 +122,7 @@ func setMetaForListed(ctx context.Context, route *v1.Route, acc accessor, names // clearMetaForNotListed uses the accessor to delete the label from any listable entity that is // not named within our list. Unlike setMetaForListed, this function takes ns/name instead of a // Route so that it can clean things up when a Route ceases to exist. -func clearMetaForNotListed(ctx context.Context, r *v1.Route, acc accessor, names sets.String) error { +func clearMetaForNotListed(ctx context.Context, r *v1.Route, acc accessor, names sets.Set[string]) error { oldList, err := acc.list(r.Namespace, r.Name, v1.RoutingStateActive) if err != nil { return err diff --git a/pkg/reconciler/nscert/nscert.go b/pkg/reconciler/nscert/nscert.go index daafb0c90fed..c9e8d913faaa 100644 --- a/pkg/reconciler/nscert/nscert.go +++ b/pkg/reconciler/nscert/nscert.go @@ -183,7 +183,7 @@ func wildcardDomain(tmpl, domain, namespace string) (string, error) { func findMatchingCert(domain string, certs []*v1alpha1.Certificate) *v1alpha1.Certificate { for _, cert := range certs { - if dnsNames := sets.NewString(cert.Spec.DNSNames...); dnsNames.Has(domain) { + if dnsNames := sets.New(cert.Spec.DNSNames...); dnsNames.Has(domain) { return cert } } diff --git a/pkg/reconciler/revision/background.go b/pkg/reconciler/revision/background.go index 8afa51bc8442..0bef35cba5ff 100644 --- a/pkg/reconciler/revision/background.go +++ b/pkg/reconciler/revision/background.go @@ -32,7 +32,7 @@ import ( // imageResolver is an interface used mostly to mock digestResolver for tests. type imageResolver interface { - Resolve(ctx context.Context, image string, opt k8schain.Options, registriesToSkip sets.String) (string, error) + Resolve(ctx context.Context, image string, opt k8schain.Options, registriesToSkip sets.Set[string]) (string, error) } // backgroundResolver performs background downloads of image digests. @@ -53,7 +53,7 @@ type backgroundResolver struct { type resolveResult struct { // these fields are immutable after creation, so can be accessed without a lock. opt k8schain.Options - registriesToSkip sets.String + registriesToSkip sets.Set[string] completionCallback func() workItems []workItem @@ -64,7 +64,7 @@ type resolveResult struct { imagesResolved map[string]string // imagesToBeResolved keeps unique image names so we can quickly compare with the current number of resolved ones - imagesToBeResolved sets.String + imagesToBeResolved sets.Set[string] err error } @@ -143,7 +143,7 @@ func (r *backgroundResolver) Start(stop <-chan struct{}, maxInFlight int) (done // If this method returns `nil, nil` this implies a resolve was triggered or is // already in progress, so the reconciler should exit and wait for the revision // to be re-enqueued when the result is ready. -func (r *backgroundResolver) Resolve(logger *zap.SugaredLogger, rev *v1.Revision, opt k8schain.Options, registriesToSkip sets.String, timeout time.Duration) (initContainerStatuses []v1.ContainerStatus, statuses []v1.ContainerStatus, error error) { +func (r *backgroundResolver) Resolve(logger *zap.SugaredLogger, rev *v1.Revision, opt k8schain.Options, registriesToSkip sets.Set[string], timeout time.Duration) (initContainerStatuses []v1.ContainerStatus, statuses []v1.ContainerStatus, error error) { r.mu.Lock() defer r.mu.Unlock() @@ -194,13 +194,13 @@ func (r *backgroundResolver) Resolve(logger *zap.SugaredLogger, rev *v1.Revision // addWorkItems adds a digest resolve item to the queue for each container in the revision. // This is expected to be called with the mutex locked. -func (r *backgroundResolver) addWorkItems(rev *v1.Revision, name types.NamespacedName, opt k8schain.Options, registriesToSkip sets.String, timeout time.Duration) { +func (r *backgroundResolver) addWorkItems(rev *v1.Revision, name types.NamespacedName, opt k8schain.Options, registriesToSkip sets.Set[string], timeout time.Duration) { totalNumOfContainers := len(rev.Spec.Containers) + len(rev.Spec.InitContainers) r.results[name] = &resolveResult{ opt: opt, registriesToSkip: registriesToSkip, imagesResolved: make(map[string]string), - imagesToBeResolved: sets.String{}, + imagesToBeResolved: sets.Set[string]{}, workItems: make([]workItem, 0, totalNumOfContainers), completionCallback: func() { r.enqueue(name) diff --git a/pkg/reconciler/revision/background_test.go b/pkg/reconciler/revision/background_test.go index 3e6d048fad8e..8310b36a9876 100644 --- a/pkg/reconciler/revision/background_test.go +++ b/pkg/reconciler/revision/background_test.go @@ -52,7 +52,7 @@ func TestResolveInBackground(t *testing.T) { wantError error }{{ name: "success", - resolver: func(_ context.Context, img string, _ k8schain.Options, _ sets.String) (string, error) { + resolver: func(_ context.Context, img string, _ k8schain.Options, _ sets.Set[string]) (string, error) { return img + "-digest", nil }, wantStatuses: []v1.ContainerStatus{{ @@ -68,8 +68,8 @@ func TestResolveInBackground(t *testing.T) { }}, }, { name: "passing params", - resolver: func(_ context.Context, img string, opt k8schain.Options, skip sets.String) (string, error) { - return fmt.Sprintf("%s-%s-%s", img, opt.ServiceAccountName, skip.List()[0]), nil + resolver: func(_ context.Context, img string, opt k8schain.Options, skip sets.Set[string]) (string, error) { + return fmt.Sprintf("%s-%s-%s", img, opt.ServiceAccountName, sets.List(skip)[0]), nil }, wantStatuses: []v1.ContainerStatus{{ Name: "first", @@ -84,7 +84,7 @@ func TestResolveInBackground(t *testing.T) { }}, }, { name: "one slow resolve", - resolver: func(_ context.Context, img string, _ k8schain.Options, _ sets.String) (string, error) { + resolver: func(_ context.Context, img string, _ k8schain.Options, _ sets.Set[string]) (string, error) { if img == "first-image" { // make the first resolve arrive after the second. time.Sleep(50 * time.Millisecond) @@ -104,13 +104,13 @@ func TestResolveInBackground(t *testing.T) { }}, }, { name: "resolver entirely fails", - resolver: func(_ context.Context, img string, _ k8schain.Options, _ sets.String) (string, error) { + resolver: func(_ context.Context, img string, _ k8schain.Options, _ sets.Set[string]) (string, error) { return img + "-digest", errDigest }, wantError: errDigest, }, { name: "resolver fails one image", - resolver: func(_ context.Context, img string, _ k8schain.Options, _ sets.String) (string, error) { + resolver: func(_ context.Context, img string, _ k8schain.Options, _ sets.Set[string]) (string, error) { if img == "second-image" { return "", errDigest } @@ -121,7 +121,7 @@ func TestResolveInBackground(t *testing.T) { }, { name: "timeout", timeout: ptr.Duration(10 * time.Millisecond), - resolver: func(ctx context.Context, img string, _ k8schain.Options, _ sets.String) (string, error) { + resolver: func(ctx context.Context, img string, _ k8schain.Options, _ sets.Set[string]) (string, error) { if img == "second-image" { select { case <-time.After(10 * time.Second): @@ -161,7 +161,7 @@ func TestResolveInBackground(t *testing.T) { for i := 0; i < 2; i++ { t.Run(fmt.Sprint("iteration", i), func(t *testing.T) { logger := logtesting.TestLogger(t) - initContainerStatuses, statuses, err := subject.Resolve(logger, fakeRevision, k8schain.Options{ServiceAccountName: "san"}, sets.NewString("skip"), timeout) + initContainerStatuses, statuses, err := subject.Resolve(logger, fakeRevision, k8schain.Options{ServiceAccountName: "san"}, sets.New("skip"), timeout) if err != nil || statuses != nil || initContainerStatuses != nil { // Initial result should be nil, nil, nil since we have nothing in cache. t.Errorf("Resolve() = %v, %v %v, wanted nil, nil, nil", statuses, initContainerStatuses, err) @@ -201,7 +201,7 @@ func TestResolveInBackground(t *testing.T) { func TestRateLimitPerItem(t *testing.T) { logger := logtesting.TestLogger(t) - var resolver resolveFunc = func(_ context.Context, img string, _ k8schain.Options, _ sets.String) (string, error) { + var resolver resolveFunc = func(_ context.Context, img string, _ k8schain.Options, _ sets.Set[string]) (string, error) { if img == "img1" || img == "init" { return "", nil } @@ -229,14 +229,14 @@ func TestRateLimitPerItem(t *testing.T) { for i := 0; i < 3; i++ { subject.Clear(types.NamespacedName{Name: revision.Name, Namespace: revision.Namespace}) start := time.Now() - initResolution, resolution, err := subject.Resolve(logger, revision, k8schain.Options{ServiceAccountName: "san"}, sets.NewString("skip"), 0) + initResolution, resolution, err := subject.Resolve(logger, revision, k8schain.Options{ServiceAccountName: "san"}, sets.New("skip"), 0) if err != nil || resolution != nil || initResolution != nil { t.Fatalf("Expected Resolve to be nil, nil, nil but got %v, %v, %v", resolution, initResolution, err) } <-enqueue - _, _, err = subject.Resolve(logger, revision, k8schain.Options{ServiceAccountName: "san"}, sets.NewString("skip"), 0) + _, _, err = subject.Resolve(logger, revision, k8schain.Options{ServiceAccountName: "san"}, sets.New("skip"), 0) if err == nil { t.Fatalf("Expected Resolve to fail") } @@ -251,7 +251,7 @@ func TestRateLimitPerItem(t *testing.T) { t.Run("Does not affect other revisions", func(t *testing.T) { start := time.Now() - _, resolution, err := subject.Resolve(logger, rev("another-revision", "img1", "img2"), k8schain.Options{ServiceAccountName: "san"}, sets.NewString("skip"), 0) + _, resolution, err := subject.Resolve(logger, rev("another-revision", "img1", "img2"), k8schain.Options{ServiceAccountName: "san"}, sets.New("skip"), 0) if err != nil || resolution != nil { t.Fatalf("Expected Resolve to be nil, nil but got %v, %v", resolution, err) } @@ -266,7 +266,7 @@ func TestRateLimitPerItem(t *testing.T) { subject.Forget(types.NamespacedName{Name: revision.Name, Namespace: revision.Namespace}) start := time.Now() - _, resolution, err := subject.Resolve(logger, revision, k8schain.Options{ServiceAccountName: "san"}, sets.NewString("skip"), 0) + _, resolution, err := subject.Resolve(logger, revision, k8schain.Options{ServiceAccountName: "san"}, sets.New("skip"), 0) if err != nil || resolution != nil { t.Fatalf("Expected Resolve to be nil, nil but got %v, %v", resolution, err) } @@ -279,9 +279,9 @@ func TestRateLimitPerItem(t *testing.T) { }) } -type resolveFunc func(context.Context, string, k8schain.Options, sets.String) (string, error) +type resolveFunc func(context.Context, string, k8schain.Options, sets.Set[string]) (string, error) -func (r resolveFunc) Resolve(c context.Context, s string, o k8schain.Options, t sets.String) (string, error) { +func (r resolveFunc) Resolve(c context.Context, s string, o k8schain.Options, t sets.Set[string]) (string, error) { return r(c, s, o, t) } diff --git a/pkg/reconciler/revision/resolve.go b/pkg/reconciler/revision/resolve.go index 1abb11e61a83..16aeeae8663e 100644 --- a/pkg/reconciler/revision/resolve.go +++ b/pkg/reconciler/revision/resolve.go @@ -91,7 +91,7 @@ func (r *digestResolver) Resolve( ctx context.Context, image string, opt k8schain.Options, - registriesToSkip sets.String) (string, error) { + registriesToSkip sets.Set[string]) (string, error) { kc, err := k8schain.New(ctx, r.client, opt) if err != nil { return "", fmt.Errorf("failed to initialize authentication: %w", err) diff --git a/pkg/reconciler/revision/resolve_test.go b/pkg/reconciler/revision/resolve_test.go index 221587774b3b..ac1942bed380 100644 --- a/pkg/reconciler/revision/resolve_test.go +++ b/pkg/reconciler/revision/resolve_test.go @@ -45,7 +45,7 @@ import ( fakeclient "k8s.io/client-go/kubernetes/fake" ) -var emptyRegistrySet = sets.NewString() +var emptyRegistrySet = sets.New[string]() func mustDigest(t *testing.T, img v1.Image) v1.Hash { h, err := img.Digest() @@ -432,7 +432,7 @@ func TestResolveSkippingRegistry(t *testing.T) { transport: http.DefaultTransport, } - registriesToSkip := sets.NewString("localhost:5000") + registriesToSkip := sets.New("localhost:5000") opt := k8schain.Options{ Namespace: ns, diff --git a/pkg/reconciler/revision/resources/deploy_test.go b/pkg/reconciler/revision/resources/deploy_test.go index a668d5efe1d3..008750bb7272 100644 --- a/pkg/reconciler/revision/resources/deploy_test.go +++ b/pkg/reconciler/revision/resources/deploy_test.go @@ -1314,7 +1314,7 @@ func TestMakePodSpec(t *testing.T) { }, { name: "qpoption tokens", dc: deployment.Config{ - QueueSidecarTokenAudiences: sets.NewString("boo-srv"), + QueueSidecarTokenAudiences: sets.New("boo-srv"), }, rev: revision("bar", "foo", withContainers([]corev1.Container{{ diff --git a/pkg/reconciler/revision/resources/meta.go b/pkg/reconciler/revision/resources/meta.go index 42d035f7dc8e..6ddf78874ce8 100644 --- a/pkg/reconciler/revision/resources/meta.go +++ b/pkg/reconciler/revision/resources/meta.go @@ -25,12 +25,12 @@ import ( ) var ( - excludeLabels = sets.NewString( + excludeLabels = sets.New( serving.RouteLabelKey, serving.RoutingStateLabelKey, ) - excludeAnnotations = sets.NewString( + excludeAnnotations = sets.New( serving.RevisionLastPinnedAnnotationKey, serving.RevisionPreservedAnnotationKey, serving.RoutingStateModifiedAnnotationKey, diff --git a/pkg/reconciler/revision/revision.go b/pkg/reconciler/revision/revision.go index d4228b8a8838..8a58774a53f1 100644 --- a/pkg/reconciler/revision/revision.go +++ b/pkg/reconciler/revision/revision.go @@ -45,7 +45,7 @@ import ( ) type resolver interface { - Resolve(*zap.SugaredLogger, *v1.Revision, k8schain.Options, sets.String, time.Duration) ([]v1.ContainerStatus, []v1.ContainerStatus, error) + Resolve(*zap.SugaredLogger, *v1.Revision, k8schain.Options, sets.Set[string], time.Duration) ([]v1.ContainerStatus, []v1.ContainerStatus, error) Clear(types.NamespacedName) Forget(types.NamespacedName) } diff --git a/pkg/reconciler/revision/revision_test.go b/pkg/reconciler/revision/revision_test.go index 462cff4e8581..d633a5863adf 100644 --- a/pkg/reconciler/revision/revision_test.go +++ b/pkg/reconciler/revision/revision_test.go @@ -228,7 +228,7 @@ func addResourcesToInformers(t *testing.T, ctx context.Context, rev *v1.Revision type nopResolver struct{} -func (r *nopResolver) Resolve(_ *zap.SugaredLogger, rev *v1.Revision, _ k8schain.Options, _ sets.String, _ time.Duration) ([]v1.ContainerStatus, []v1.ContainerStatus, error) { +func (r *nopResolver) Resolve(_ *zap.SugaredLogger, rev *v1.Revision, _ k8schain.Options, _ sets.Set[string], _ time.Duration) ([]v1.ContainerStatus, []v1.ContainerStatus, error) { status := []v1.ContainerStatus{{ Name: rev.Spec.Containers[0].Name, }} @@ -339,7 +339,7 @@ func testDefaultsCM() *corev1.ConfigMap { type notResolvedYetResolver struct{} -func (r *notResolvedYetResolver) Resolve(_ *zap.SugaredLogger, _ *v1.Revision, _ k8schain.Options, _ sets.String, _ time.Duration) ([]v1.ContainerStatus, []v1.ContainerStatus, error) { +func (r *notResolvedYetResolver) Resolve(_ *zap.SugaredLogger, _ *v1.Revision, _ k8schain.Options, _ sets.Set[string], _ time.Duration) ([]v1.ContainerStatus, []v1.ContainerStatus, error) { return nil, nil, nil } @@ -351,7 +351,7 @@ type errorResolver struct { cleared bool } -func (r *errorResolver) Resolve(_ *zap.SugaredLogger, _ *v1.Revision, _ k8schain.Options, _ sets.String, _ time.Duration) ([]v1.ContainerStatus, []v1.ContainerStatus, error) { +func (r *errorResolver) Resolve(_ *zap.SugaredLogger, _ *v1.Revision, _ k8schain.Options, _ sets.Set[string], _ time.Duration) ([]v1.ContainerStatus, []v1.ContainerStatus, error) { return nil, nil, r.err } diff --git a/pkg/reconciler/route/reconcile_resources.go b/pkg/reconciler/route/reconcile_resources.go index a27d11617844..c028be31546c 100644 --- a/pkg/reconciler/route/reconcile_resources.go +++ b/pkg/reconciler/route/reconcile_resources.go @@ -111,7 +111,7 @@ func (c *Reconciler) reconcileIngress( func (c *Reconciler) deleteOrphanedServices(ctx context.Context, r *v1.Route, activeServices []resources.ServicePair) error { ns := r.Namespace - active := make(sets.String, len(activeServices)) + active := make(sets.Set[string], len(activeServices)) for _, service := range activeServices { active.Insert(service.Service.Name) @@ -154,7 +154,7 @@ func (c *Reconciler) reconcilePlaceholderServices(ctx context.Context, route *v1 recorder := controller.GetEventRecorder(ctx) ns := route.Namespace services := make([]resources.ServicePair, 0, len(targets)) - names := make(sets.String, len(targets)) + names := make(sets.Set[string], len(targets)) // Note: this is done in order for the tests to be // deterministic since they assert creations in order @@ -162,7 +162,7 @@ func (c *Reconciler) reconcilePlaceholderServices(ctx context.Context, route *v1 names.Insert(name) } - for _, name := range names.List() { + for _, name := range sets.List(names) { desiredService, err := resources.MakeK8sPlaceholderService(ctx, route, name) if err != nil { return nil, fmt.Errorf("failed to construct placeholder k8s service: %w", err) diff --git a/pkg/reconciler/route/resources/filters.go b/pkg/reconciler/route/resources/filters.go index ff4601b064f3..48bff0491084 100644 --- a/pkg/reconciler/route/resources/filters.go +++ b/pkg/reconciler/route/resources/filters.go @@ -30,7 +30,7 @@ func IsClusterLocalService(svc *corev1.Service) bool { // ExcludedAnnotations is the set of annotations that should not propagate to the // Ingress or Certificate Resources -var ExcludedAnnotations = sets.NewString( +var ExcludedAnnotations = sets.New[string]( corev1.LastAppliedConfigAnnotation, // We're probably never going to drop support for the older annotation casing (camelCase) diff --git a/pkg/reconciler/route/resources/ingress.go b/pkg/reconciler/route/resources/ingress.go index 4d866c5ea846..862a8568d1fd 100644 --- a/pkg/reconciler/route/resources/ingress.go +++ b/pkg/reconciler/route/resources/ingress.go @@ -205,7 +205,7 @@ func makeIngressSpec( }, nil } -func routeDomain(ctx context.Context, targetName string, r *servingv1.Route, visibility netv1alpha1.IngressVisibility) (sets.String, error) { +func routeDomain(ctx context.Context, targetName string, r *servingv1.Route, visibility netv1alpha1.IngressVisibility) (sets.Set[string], error) { hostname, err := domains.HostnameFromTemplate(ctx, r.Name, targetName) if err != nil { return nil, err @@ -221,14 +221,14 @@ func routeDomain(ctx context.Context, targetName string, r *servingv1.Route, vis } domains := []string{domain} if isClusterLocal { - domains = ingress.ExpandedHosts(sets.NewString(domains...)).List() + domains = sets.List(ingress.ExpandedHosts(sets.New(domains...))) } - return sets.NewString(domains...), err + return sets.New(domains...), err } // MakeACMEIngressPaths returns a set of netv1alpha1.HTTPIngressPath // that can be used to perform ACME challenges. -func MakeACMEIngressPaths(acmeChallenges []netv1alpha1.HTTP01Challenge, domains sets.String) ([]netv1alpha1.HTTPIngressPath, []string) { +func MakeACMEIngressPaths(acmeChallenges []netv1alpha1.HTTP01Challenge, domains sets.Set[string]) ([]netv1alpha1.HTTPIngressPath, []string) { paths := make([]netv1alpha1.HTTPIngressPath, 0, len(acmeChallenges)) var extraHosts []string @@ -252,13 +252,13 @@ func MakeACMEIngressPaths(acmeChallenges []netv1alpha1.HTTP01Challenge, domains return paths, extraHosts } -func makeIngressRule(domains sets.String, ns string, +func makeIngressRule(domains sets.Set[string], ns string, visibility netv1alpha1.IngressVisibility, targets traffic.RevisionTargets, roCfgs []*traffic.ConfigurationRollout, encryption bool) netv1alpha1.IngressRule { return netv1alpha1.IngressRule{ - Hosts: domains.List(), + Hosts: sets.List(domains), Visibility: visibility, HTTP: &netv1alpha1.HTTPIngressRuleValue{ Paths: []netv1alpha1.HTTPIngressPath{ diff --git a/pkg/reconciler/route/resources/ingress_test.go b/pkg/reconciler/route/resources/ingress_test.go index f268ba72f2d6..54919c2beea8 100644 --- a/pkg/reconciler/route/resources/ingress_test.go +++ b/pkg/reconciler/route/resources/ingress_test.go @@ -854,7 +854,7 @@ func TestMakeIngressSpecCorrectRulesWithTagBasedRouting(t *testing.T) { // One active target. func TestMakeIngressRuleVanilla(t *testing.T) { - domains := sets.NewString("a.com", "b.org") + domains := sets.New("a.com", "b.org") targets := traffic.RevisionTargets{{ TrafficTarget: v1.TrafficTarget{ ConfigurationName: "config", @@ -914,7 +914,7 @@ func TestMakeIngressRuleZeroPercentTarget(t *testing.T) { Percent: ptr.Int64(0), }, }} - domains := sets.NewString("test.org") + domains := sets.New("test.org") tc := &traffic.Config{ Targets: map[string]traffic.RevisionTargets{ traffic.DefaultTarget: targets, @@ -970,7 +970,7 @@ func TestMakeIngressRuleTwoTargets(t *testing.T) { }, } ro := tc.BuildRollout() - domains := sets.NewString("test.org") + domains := sets.New("test.org") rule := makeIngressRule(domains, ns, netv1alpha1.IngressVisibilityExternalIP, targets, ro.RolloutsByTag("a-tag"), false /* internal encryption */) expected := netv1alpha1.IngressRule{ diff --git a/pkg/reconciler/route/route.go b/pkg/reconciler/route/route.go index 22b170c56a53..3a2a6a9403d6 100644 --- a/pkg/reconciler/route/route.go +++ b/pkg/reconciler/route/route.go @@ -218,7 +218,7 @@ func (c *Reconciler) tls(ctx context.Context, host string, r *v1.Route, traffic acmeChallenges := []netv1alpha1.HTTP01Challenge{} desiredCerts := resources.MakeCertificates(r, domainToTagMap, certClass(ctx, r), domain) for _, desiredCert := range desiredCerts { - dnsNames := sets.NewString(desiredCert.Spec.DNSNames...) + dnsNames := sets.New(desiredCert.Spec.DNSNames...) // Look for a matching wildcard cert before provisioning a new one. This saves the // the time required to provision a new cert and reduces the chances of hitting the // Let's Encrypt API rate limits. @@ -234,7 +234,7 @@ func (c *Reconciler) tls(ctx context.Context, host string, r *v1.Route, traffic } return nil, nil, err } - dnsNames = sets.NewString(cert.Spec.DNSNames...) + dnsNames = sets.New(cert.Spec.DNSNames...) } // r.Status.URL is for the major domain, so only change if the cert is for @@ -244,7 +244,7 @@ func (c *Reconciler) tls(ctx context.Context, host string, r *v1.Route, traffic } // TODO: we should only mark https for the public visible targets when // we are able to configure visibility per target. - setTargetsScheme(&r.Status, dnsNames.List(), "https") + setTargetsScheme(&r.Status, sets.List(dnsNames), "https") if cert.IsReady() { if renewingCondition := cert.Status.GetCondition("Renewing"); renewingCondition != nil { @@ -262,7 +262,7 @@ func (c *Reconciler) tls(ctx context.Context, host string, r *v1.Route, traffic } } r.Status.MarkCertificateReady(cert.Name) - tls = append(tls, resources.MakeIngressTLS(cert, dnsNames.List())) + tls = append(tls, resources.MakeIngressTLS(cert, sets.List(dnsNames))) } else { acmeChallenges = append(acmeChallenges, cert.Status.HTTP01Challenges...) r.Status.MarkCertificateNotReady(cert) @@ -276,7 +276,7 @@ func (c *Reconciler) tls(ctx context.Context, host string, r *v1.Route, traffic Host: host, } } - setTargetsScheme(&r.Status, dnsNames.List(), "http") + setTargetsScheme(&r.Status, sets.List(dnsNames), "http") r.Status.MarkHTTPDowngrade(cert.Name) } } @@ -520,7 +520,7 @@ func findMatchingWildcardCert(ctx context.Context, domains []string, certs []*ne } func wildcardCertMatches(ctx context.Context, domains []string, cert *netv1alpha1.Certificate) bool { - dnsNames := make(sets.String, len(cert.Spec.DNSNames)) + dnsNames := make(sets.Set[string], len(cert.Spec.DNSNames)) logger := logging.FromContext(ctx) for _, dns := range cert.Spec.DNSNames { diff --git a/pkg/reconciler/route/visibility/visibility.go b/pkg/reconciler/route/visibility/visibility.go index 1f357485feff..a71edade3490 100644 --- a/pkg/reconciler/route/visibility/visibility.go +++ b/pkg/reconciler/route/visibility/visibility.go @@ -71,8 +71,8 @@ func (b *Resolver) routeVisibility(ctx context.Context, route *v1.Route) netv1al return netv1alpha1.IngressVisibilityExternalIP } -func trafficNames(route *v1.Route) sets.String { - names := sets.NewString(traffic.DefaultTarget) +func trafficNames(route *v1.Route) sets.Set[string] { + names := sets.New(traffic.DefaultTarget) for _, tt := range route.Spec.Traffic { names.Insert(tt.Tag) } diff --git a/pkg/reconciler/serverlessservice/serverlessservice.go b/pkg/reconciler/serverlessservice/serverlessservice.go index 1eaefe23232e..d1a216cd9c0b 100644 --- a/pkg/reconciler/serverlessservice/serverlessservice.go +++ b/pkg/reconciler/serverlessservice/serverlessservice.go @@ -138,7 +138,7 @@ func subsetEndpoints(eps *corev1.Endpoints, target string, n int) *corev1.Endpoi return eps } - addrs := make(sets.String, len(eps.Subsets[0].Addresses)) + addrs := make(sets.Set[string], len(eps.Subsets[0].Addresses)) for _, ss := range eps.Subsets { for _, addr := range ss.Addresses { addrs.Insert(addr.IP) diff --git a/test/conformance/api/shared/util.go b/test/conformance/api/shared/util.go index 08e71cd63883..23f5e5ec83b2 100644 --- a/test/conformance/api/shared/util.go +++ b/test/conformance/api/shared/util.go @@ -39,7 +39,7 @@ import ( // associated with images that aren't actually published to a registry, but // side-loaded into the cluster's container daemon via an operation like // `docker load` or `kind load`. -var DigestResolutionExceptions = sets.NewString("kind.local", "ko.local", "dev.local") +var DigestResolutionExceptions = sets.New("kind.local", "ko.local", "dev.local") // ValidateImageDigest validates the image digest. func ValidateImageDigest(t *testing.T, imageName string, imageDigest string) (bool, error) { diff --git a/test/conformance/runtime/sysctl_test.go b/test/conformance/runtime/sysctl_test.go index 00900276d4c9..49d9a05593dd 100644 --- a/test/conformance/runtime/sysctl_test.go +++ b/test/conformance/runtime/sysctl_test.go @@ -50,7 +50,7 @@ func TestShouldHaveSysctlReadOnly(t *testing.T) { if got, want := mount.Device, "proc"; got != want { t.Errorf("%s has mount.Device = %s, wanted: %s", mount.Path, mount.Device, want) } - if !sets.NewString(mount.Options...).Has("ro") { + if !sets.New(mount.Options...).Has("ro") { t.Errorf("%s has mount.Options = %v, wanted: ro", mount.Path, mount.Options) } } diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index 01a1a4b52d12..20e1fbf66356 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -95,7 +95,7 @@ func WaitForScaleToZero(t *testing.T, deploymentName string, clients *test.Clien // waitForActivatorEndpoints waits for the Service endpoints to match that of activator. func waitForActivatorEndpoints(ctx *TestContext) error { var ( - aset, svcSet sets.String + aset, svcSet sets.Set[string] wantAct int ) @@ -118,13 +118,13 @@ func waitForActivatorEndpoints(ctx *TestContext) error { } wantAct = int(sks.Spec.NumActivators) - aset = make(sets.String, wantAct) + aset = make(sets.Set[string], wantAct) for _, ss := range actEps.Subsets { for i := 0; i < len(ss.Addresses); i++ { aset.Insert(ss.Addresses[i].IP) } } - svcSet = make(sets.String, wantAct) + svcSet = make(sets.Set[string], wantAct) for _, ss := range svcEps.Subsets { for i := 0; i < len(ss.Addresses); i++ { svcSet.Insert(ss.Addresses[i].IP) @@ -144,7 +144,7 @@ func waitForActivatorEndpoints(ctx *TestContext) error { ctx.t.Logf("Did not see activator endpoints in public service for %s."+ "Last received values: Activator: %v "+ "PubSvc: %v, WantActivators %d", - ctx.resources.Revision.Name, aset.List(), svcSet.List(), wantAct) + ctx.resources.Revision.Name, sets.List(aset), sets.List(svcSet), wantAct) return rerr } return nil diff --git a/test/e2e/image_pull_error_test.go b/test/e2e/image_pull_error_test.go index 3bfdf801390f..efeb5aaebed3 100644 --- a/test/e2e/image_pull_error_test.go +++ b/test/e2e/image_pull_error_test.go @@ -71,7 +71,7 @@ func TestImagePullError(t *testing.T) { } t.Log("When the images are not pulled, the revision should have error status.") - wantRevReasons := sets.NewString("ImagePullBackOff", "ErrImagePull") + wantRevReasons := sets.New("ImagePullBackOff", "ErrImagePull") if err := v1test.CheckRevisionState(clients.ServingClient, revisionName, func(r *v1.Revision) (bool, error) { cond := r.Status.GetCondition(v1.RevisionConditionReady) if cond != nil { diff --git a/test/ha/autoscaler_test.go b/test/ha/autoscaler_test.go index 82b771d5a513..0a6bdc36c663 100644 --- a/test/ha/autoscaler_test.go +++ b/test/ha/autoscaler_test.go @@ -70,7 +70,7 @@ func TestAutoscalerHA(t *testing.T) { t.Fatalf("Deployment %s not scaled to %d: %v", autoscalerDeploymentName, test.ServingFlags.Replicas, err) } - leaders, err := pkgHa.WaitForNewLeaders(context.Background(), t, clients.KubeClient, autoscalerDeploymentName, system.Namespace(), sets.NewString(), test.ServingFlags.Buckets) + leaders, err := pkgHa.WaitForNewLeaders(context.Background(), t, clients.KubeClient, autoscalerDeploymentName, system.Namespace(), sets.New[string](), test.ServingFlags.Buckets) if err != nil { t.Fatal("Failed to get leader:", err) } @@ -81,7 +81,7 @@ func TestAutoscalerHA(t *testing.T) { t.Fatal("Failed to scale to zero:", err) } - for _, leader := range leaders.List() { + for _, leader := range sets.List(leaders) { if err := clients.KubeClient.CoreV1().Pods(system.Namespace()).Delete(context.Background(), leader, metav1.DeleteOptions{}); err != nil && !apierrs.IsNotFound(err) { t.Fatalf("Failed to delete pod %s: %v", leader, err) diff --git a/test/ha/autoscalerhpa_test.go b/test/ha/autoscalerhpa_test.go index 7298a8d5fbc3..98c43a31c3a1 100644 --- a/test/ha/autoscalerhpa_test.go +++ b/test/ha/autoscalerhpa_test.go @@ -47,7 +47,7 @@ func TestAutoscalerHPAHANewRevision(t *testing.T) { } // TODO(mattmoor): Once we switch to the new sharded leader election, we should use more than a single bucket here, but the test is still interesting. - leaders, err := pkgHa.WaitForNewLeaders(context.Background(), t, clients.KubeClient, autoscalerHPADeploymentName, system.Namespace(), sets.NewString(), test.ServingFlags.Buckets) + leaders, err := pkgHa.WaitForNewLeaders(context.Background(), t, clients.KubeClient, autoscalerHPADeploymentName, system.Namespace(), sets.New[string](), test.ServingFlags.Buckets) if err != nil { t.Fatal("Failed to get leader:", err) } @@ -62,7 +62,7 @@ func TestAutoscalerHPAHANewRevision(t *testing.T) { test.EnsureTearDown(t, clients, &names) - for _, leader := range leaders.List() { + for _, leader := range sets.List(leaders) { if err := clients.KubeClient.CoreV1().Pods(system.Namespace()).Delete(context.Background(), leader, metav1.DeleteOptions{}); err != nil && !apierrs.IsNotFound(err) { t.Fatalf("Failed to delete pod %s: %v", leader, err) diff --git a/test/ha/controller_test.go b/test/ha/controller_test.go index 1c5d06aba37f..34ec1bddb9f9 100644 --- a/test/ha/controller_test.go +++ b/test/ha/controller_test.go @@ -46,7 +46,7 @@ func TestControllerHA(t *testing.T) { } // TODO(mattmoor): Once we switch to the new sharded leader election, we should use more than a single bucket here, but the test is still interesting. - leaders, err := pkgHa.WaitForNewLeaders(context.Background(), t, clients.KubeClient, controllerDeploymentName, system.Namespace(), sets.NewString(), NumControllerReconcilers*test.ServingFlags.Buckets) + leaders, err := pkgHa.WaitForNewLeaders(context.Background(), t, clients.KubeClient, controllerDeploymentName, system.Namespace(), sets.New[string](), NumControllerReconcilers*test.ServingFlags.Buckets) if err != nil { t.Fatal("Failed to get leader:", err) } @@ -58,7 +58,7 @@ func TestControllerHA(t *testing.T) { prober := test.RunRouteProber(t.Logf, clients, resources.Service.Status.URL.URL(), test.AddRootCAtoTransport(context.Background(), t.Logf, clients, test.ServingFlags.HTTPS)) defer test.AssertProberDefault(t, prober) - for _, leader := range leaders.List() { + for _, leader := range sets.List(leaders) { if err := clients.KubeClient.CoreV1().Pods(system.Namespace()).Delete(context.Background(), leader, metav1.DeleteOptions{}); err != nil && !apierrs.IsNotFound(err) { t.Fatalf("Failed to delete pod %s: %v", leader, err) diff --git a/vendor/knative.dev/networking/pkg/apis/networking/metadata_validation.go b/vendor/knative.dev/networking/pkg/apis/networking/metadata_validation.go index fbd6c155fd60..ff081dcd0bf9 100644 --- a/vendor/knative.dev/networking/pkg/apis/networking/metadata_validation.go +++ b/vendor/knative.dev/networking/pkg/apis/networking/metadata_validation.go @@ -25,7 +25,7 @@ import ( ) var ( - allowedAnnotations = sets.NewString( + allowedAnnotations = sets.New[string]( IngressClassAnnotationKey, CertificateClassAnnotationKey, DisableAutoTLSAnnotationKey, diff --git a/vendor/knative.dev/networking/pkg/certificates/reconciler/certificates.go b/vendor/knative.dev/networking/pkg/certificates/reconciler/certificates.go index bf8c26d207f8..2078fbca295b 100644 --- a/vendor/knative.dev/networking/pkg/certificates/reconciler/certificates.go +++ b/vendor/knative.dev/networking/pkg/certificates/reconciler/certificates.go @@ -165,8 +165,8 @@ func parseAndValidateSecret(secret *corev1.Secret, caCert []byte, sans ...string return nil, nil, err } - sanSet := sets.NewString(sans...) - certSet := sets.NewString(cert.DNSNames...) + sanSet := sets.New(sans...) + certSet := sets.New(cert.DNSNames...) if !sanSet.Equal(certSet) { return nil, nil, fmt.Errorf("unexpected SANs") } diff --git a/vendor/knative.dev/networking/pkg/certificates/reconciler/reconciler.go b/vendor/knative.dev/networking/pkg/certificates/reconciler/reconciler.go index d036ba0f0115..c1aa005f4d0a 100644 --- a/vendor/knative.dev/networking/pkg/certificates/reconciler/reconciler.go +++ b/vendor/knative.dev/networking/pkg/certificates/reconciler/reconciler.go @@ -289,14 +289,14 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { // updateFinalizersFiltered will update the Finalizers of the resource. // TODO: this method could be generic and sync all finalizers. For now it only // updates defaultFinalizerName or its override. -func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1.Secret, desiredFinalizers sets.String) (*v1.Secret, error) { +func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1.Secret, desiredFinalizers sets.Set[string]) (*v1.Secret, error) { // Don't modify the informers copy. existing := resource.DeepCopy() var finalizers []string // If there's nothing to update, just return. - existingFinalizers := sets.NewString(existing.Finalizers...) + existingFinalizers := sets.New(existing.Finalizers...) if desiredFinalizers.Has(r.finalizerName) { if existingFinalizers.Has(r.finalizerName) { @@ -312,7 +312,7 @@ func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource } // Remove the finalizer. existingFinalizers.Delete(r.finalizerName) - finalizers = existingFinalizers.List() + finalizers = sets.List(existingFinalizers) } mergePatch := map[string]interface{}{ @@ -346,7 +346,7 @@ func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource * return resource, nil } - finalizers := sets.NewString(resource.Finalizers...) + finalizers := sets.New(resource.Finalizers...) // If this resource is not being deleted, mark the finalizer. if resource.GetDeletionTimestamp().IsZero() { @@ -365,7 +365,7 @@ func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1.Secret return resource, nil } - finalizers := sets.NewString(resource.Finalizers...) + finalizers := sets.New(resource.Finalizers...) if reconcileEvent != nil { var event *pkgreconciler.ReconcilerEvent diff --git a/vendor/knative.dev/networking/pkg/client/injection/reconciler/networking/v1alpha1/serverlessservice/reconciler.go b/vendor/knative.dev/networking/pkg/client/injection/reconciler/networking/v1alpha1/serverlessservice/reconciler.go index 761cbf090cef..0ead54735fdf 100644 --- a/vendor/knative.dev/networking/pkg/client/injection/reconciler/networking/v1alpha1/serverlessservice/reconciler.go +++ b/vendor/knative.dev/networking/pkg/client/injection/reconciler/networking/v1alpha1/serverlessservice/reconciler.go @@ -346,14 +346,14 @@ func (r *reconcilerImpl) updateStatus(ctx context.Context, logger *zap.SugaredLo // updateFinalizersFiltered will update the Finalizers of the resource. // TODO: this method could be generic and sync all finalizers. For now it only // updates defaultFinalizerName or its override. -func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1alpha1.ServerlessService, desiredFinalizers sets.String) (*v1alpha1.ServerlessService, error) { +func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1alpha1.ServerlessService, desiredFinalizers sets.Set[string]) (*v1alpha1.ServerlessService, error) { // Don't modify the informers copy. existing := resource.DeepCopy() var finalizers []string // If there's nothing to update, just return. - existingFinalizers := sets.NewString(existing.Finalizers...) + existingFinalizers := sets.New[string](existing.Finalizers...) if desiredFinalizers.Has(r.finalizerName) { if existingFinalizers.Has(r.finalizerName) { @@ -369,7 +369,7 @@ func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource } // Remove the finalizer. existingFinalizers.Delete(r.finalizerName) - finalizers = existingFinalizers.List() + finalizers = sets.List(existingFinalizers) } mergePatch := map[string]interface{}{ @@ -403,7 +403,7 @@ func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource * return resource, nil } - finalizers := sets.NewString(resource.Finalizers...) + finalizers := sets.New[string](resource.Finalizers...) // If this resource is not being deleted, mark the finalizer. if resource.GetDeletionTimestamp().IsZero() { @@ -422,7 +422,7 @@ func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1alpha1. return resource, nil } - finalizers := sets.NewString(resource.Finalizers...) + finalizers := sets.New[string](resource.Finalizers...) if reconcileEvent != nil { var event *reconciler.ReconcilerEvent diff --git a/vendor/knative.dev/networking/pkg/ingress/ingress.go b/vendor/knative.dev/networking/pkg/ingress/ingress.go index 5e24eecfa91f..f1ffb88c447e 100644 --- a/vendor/knative.dev/networking/pkg/ingress/ingress.go +++ b/vendor/knative.dev/networking/pkg/ingress/ingress.go @@ -73,13 +73,13 @@ func InsertProbe(ing *v1alpha1.Ingress) (string, error) { // HostsPerVisibility takes an Ingress and a map from visibility levels to a set of string keys, // it then returns a map from that key space to the hosts under that visibility. -func HostsPerVisibility(ing *v1alpha1.Ingress, visibilityToKey map[v1alpha1.IngressVisibility]sets.String) map[string]sets.String { - output := make(map[string]sets.String, 2) // We currently have public and internal. +func HostsPerVisibility(ing *v1alpha1.Ingress, visibilityToKey map[v1alpha1.IngressVisibility]sets.Set[string]) map[string]sets.Set[string] { + output := make(map[string]sets.Set[string], 2) // We currently have public and internal. for _, rule := range ing.Spec.Rules { - for host := range ExpandedHosts(sets.NewString(rule.Hosts...)) { + for host := range ExpandedHosts(sets.New(rule.Hosts...)) { for key := range visibilityToKey[rule.Visibility] { if _, ok := output[key]; !ok { - output[key] = make(sets.String, len(rule.Hosts)) + output[key] = make(sets.Set[string], len(rule.Hosts)) } output[key].Insert(host) } @@ -89,15 +89,15 @@ func HostsPerVisibility(ing *v1alpha1.Ingress, visibilityToKey map[v1alpha1.Ingr } // ExpandedHosts sets up hosts for the short-names for cluster DNS names. -func ExpandedHosts(hosts sets.String) sets.String { +func ExpandedHosts(hosts sets.Set[string]) sets.Set[string] { allowedSuffixes := []string{ "", "." + network.GetClusterDomainName(), ".svc." + network.GetClusterDomainName(), } // Optimistically pre-alloc. - expanded := make(sets.String, len(hosts)*len(allowedSuffixes)) - for _, h := range hosts.List() { + expanded := make(sets.Set[string], len(hosts)*len(allowedSuffixes)) + for _, h := range sets.List(hosts) { for _, suffix := range allowedSuffixes { if th := strings.TrimSuffix(h, suffix); suffix == "" || len(th) < len(h) { if isValidTopLevelDomain(th) { diff --git a/vendor/knative.dev/networking/pkg/prober/prober.go b/vendor/knative.dev/networking/pkg/prober/prober.go index 4ff7fb06ec9e..ec9330606315 100644 --- a/vendor/knative.dev/networking/pkg/prober/prober.go +++ b/vendor/knative.dev/networking/pkg/prober/prober.go @@ -143,14 +143,14 @@ type Manager struct { // mu guards keys. mu sync.Mutex - keys sets.String + keys sets.Set[string] } // New creates a new Manager, that will invoke the given callback when // async probing is finished. func New(cb Done, transport http.RoundTripper) *Manager { return &Manager{ - keys: sets.NewString(), + keys: sets.New[string](), cb: cb, transport: transport, } diff --git a/vendor/modules.txt b/vendor/modules.txt index 050cd1aa2e15..fe5cc1bef183 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1333,7 +1333,7 @@ knative.dev/caching/pkg/client/listers/caching/v1alpha1 # knative.dev/hack v0.0.0-20240108153050-3ea694d6dad7 ## explicit; go 1.18 knative.dev/hack -# knative.dev/networking v0.0.0-20240108134621-7cca4b010b25 +# knative.dev/networking v0.0.0-20240108134621-7cca4b010b25 => github.com/ReToCode/networking v0.0.0-20240109073627-bc010699726a ## explicit; go 1.18 knative.dev/networking/config knative.dev/networking/pkg @@ -1504,3 +1504,4 @@ sigs.k8s.io/structured-merge-diff/v4/value sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 # github.com/gorilla/websocket => github.com/gorilla/websocket v1.5.0 +# knative.dev/networking => github.com/ReToCode/networking v0.0.0-20240109073627-bc010699726a