From 44452995d62e34e6f79e1fe939548b91e3bbf463 Mon Sep 17 00:00:00 2001 From: Matt Matejczyk Date: Fri, 15 Nov 2019 11:16:53 +0100 Subject: [PATCH] Increase cache size for endpointslices. It should match what we have for endpoints as endpointslices is replacing them and # updates of endpointslices should be roughly the same as # updates of endpoints. Otherwise, kube-proxy may start loosing watches for endpointslices (we believe this is what currently happens in scale tests, will create an issue for that soon). --- pkg/registry/cachesize/cachesize.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/registry/cachesize/cachesize.go b/pkg/registry/cachesize/cachesize.go index 92bcc6a6306db..37575af570cbd 100644 --- a/pkg/registry/cachesize/cachesize.go +++ b/pkg/registry/cachesize/cachesize.go @@ -36,6 +36,7 @@ func NewHeuristicWatchCacheSizes(expectedRAMCapacityMB int) map[schema.GroupReso watchCacheSizes := make(map[schema.GroupResource]int) watchCacheSizes[schema.GroupResource{Resource: "replicationcontrollers"}] = maxInt(5*clusterSize, 100) watchCacheSizes[schema.GroupResource{Resource: "endpoints"}] = maxInt(10*clusterSize, 1000) + watchCacheSizes[schema.GroupResource{Resource: "endpointslices", Group: "discovery.k8s.io"}] = maxInt(10*clusterSize, 1000) watchCacheSizes[schema.GroupResource{Resource: "nodes"}] = maxInt(5*clusterSize, 1000) watchCacheSizes[schema.GroupResource{Resource: "pods"}] = maxInt(50*clusterSize, 1000) watchCacheSizes[schema.GroupResource{Resource: "services"}] = maxInt(5*clusterSize, 1000)