From 348bfe5ede1aa2081dc23e443cb2588284fdcc2d Mon Sep 17 00:00:00 2001 From: Alexander Yastrebov Date: Fri, 20 Sep 2024 19:33:10 +0200 Subject: [PATCH] filters/auth: release tokeninfo cache mutex earlier (#3241) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * filters/auth: reset parallelism for BenchmarkTokeninfoCache Signed-off-by: Alexander Yastrebov * filters/auth: release tokeninfo cache mutex earlier Cached value is readonly so release cache mutex early to copy and adjust cached value outside of critical section. ``` goos: linux goarch: amd64 pkg: github.com/zalando/skipper/filters/auth cpu: Intel(R) Core(TM) i5-8350U CPU @ 1.70GHz │ HEAD~1 │ HEAD │ │ sec/op │ sec/op vs base │ TokeninfoCache/tokens=1,cacheSize=1,p=0-8 639.4n ± 21% 271.9n ± 6% -57.48% (p=0.000 n=10) TokeninfoCache/tokens=2,cacheSize=2,p=0-8 625.7n ± 24% 420.4n ± 32% -32.81% (p=0.000 n=10) TokeninfoCache/tokens=100,cacheSize=100,p=0-8 869.8n ± 17% 463.4n ± 3% -46.72% (p=0.000 n=10) TokeninfoCache/tokens=100,cacheSize=100,p=10000-8 670.8n ± 12% 594.6n ± 2% -11.36% (p=0.000 n=10) TokeninfoCache/tokens=4,cacheSize=2,p=0-8 2.553m ± 1% 2.554m ± 1% ~ (p=0.684 n=10) TokeninfoCache/tokens=100,cacheSize=10,p=0-8 2.581m ± 1% 2.566m ± 0% ~ (p=0.089 n=10) geomean 10.74µ 7.688µ -28.44% │ HEAD~1 │ HEAD │ │ B/op │ B/op vs base │ TokeninfoCache/tokens=1,cacheSize=1,p=0-8 344.0 ± 0% 344.0 ± 0% ~ (p=1.000 n=10) ¹ TokeninfoCache/tokens=2,cacheSize=2,p=0-8 344.0 ± 0% 344.0 ± 0% ~ (p=1.000 n=10) ¹ TokeninfoCache/tokens=100,cacheSize=100,p=0-8 344.0 ± 0% 344.0 ± 0% ~ (p=1.000 n=10) ¹ TokeninfoCache/tokens=100,cacheSize=100,p=10000-8 366.0 ± 0% 366.0 ± 1% ~ (p=0.452 n=10) TokeninfoCache/tokens=4,cacheSize=2,p=0-8 27.00 ± 4% 27.00 ± 0% ~ (p=0.474 n=10) TokeninfoCache/tokens=100,cacheSize=10,p=0-8 28.00 ± 7% 27.00 ± 7% ~ (p=0.259 n=10) geomean 149.7 148.8 -0.60% ¹ all samples are equal │ HEAD~1 │ HEAD │ │ allocs/op │ allocs/op vs base │ TokeninfoCache/tokens=1,cacheSize=1,p=0-8 3.000 ± 0% 3.000 ± 0% ~ (p=1.000 n=10) ¹ TokeninfoCache/tokens=2,cacheSize=2,p=0-8 3.000 ± 0% 3.000 ± 0% ~ (p=1.000 n=10) ¹ TokeninfoCache/tokens=100,cacheSize=100,p=0-8 3.000 ± 0% 3.000 ± 0% ~ (p=1.000 n=10) ¹ TokeninfoCache/tokens=100,cacheSize=100,p=10000-8 3.000 ± 0% 3.000 ± 0% ~ (p=1.000 n=10) ¹ TokeninfoCache/tokens=4,cacheSize=2,p=0-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ TokeninfoCache/tokens=100,cacheSize=10,p=0-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ geomean ² +0.00% ² ¹ all samples are equal ² summaries must be >0 to compute geomean ``` Signed-off-by: Alexander Yastrebov --------- Signed-off-by: Alexander Yastrebov --- filters/auth/tokeninfocache.go | 10 ++++++++-- filters/auth/tokeninfocache_test.go | 2 ++ 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/filters/auth/tokeninfocache.go b/filters/auth/tokeninfocache.go index 65cd65e112..c925ae5834 100644 --- a/filters/auth/tokeninfocache.go +++ b/filters/auth/tokeninfocache.go @@ -61,16 +61,19 @@ func (c *tokeninfoCache) cached(token string) map[string]any { now := c.now() c.mu.Lock() - defer c.mu.Unlock() if e, ok := c.cache[token]; ok { if now.Before(e.expiresAt) { c.history.MoveToFront(e.href) + cachedInfo := e.info + c.mu.Unlock() + // It might be ok to return cached value // without adjusting "expires_in" to avoid copy + // if caller never modifies the result and // when "expires_in" did not change (same second) // or for small TTL values - info := shallowCopyOf(e.info) + info := shallowCopyOf(cachedInfo) elapsed := now.Sub(e.cachedAt).Truncate(time.Second).Seconds() info[expiresInField] = info[expiresInField].(float64) - elapsed @@ -81,6 +84,9 @@ func (c *tokeninfoCache) cached(token string) map[string]any { c.history.Remove(e.href) } } + + c.mu.Unlock() + return nil } diff --git a/filters/auth/tokeninfocache_test.go b/filters/auth/tokeninfocache_test.go index 62a6d9f41b..a5cfbf4a0e 100644 --- a/filters/auth/tokeninfocache_test.go +++ b/filters/auth/tokeninfocache_test.go @@ -249,6 +249,8 @@ func BenchmarkTokeninfoCache(b *testing.B) { if bi.parallelism != 0 { b.SetParallelism(bi.parallelism) + } else { + b.SetParallelism(1) } b.ReportAllocs()