@@ -7,13 +7,16 @@ package queryrange
77
88import (
99 "context"
10+ "net/http"
11+ "sync"
1012 "testing"
1113 "time"
1214
1315 "github.com/stretchr/testify/assert"
1416 "github.com/stretchr/testify/mock"
1517 "github.com/stretchr/testify/require"
1618 "github.com/weaveworks/common/user"
19+ "go.uber.org/atomic"
1720
1821 "github.com/grafana/mimir/pkg/util"
1922)
@@ -192,10 +195,11 @@ func TestLimitsMiddleware_MaxQueryLength(t *testing.T) {
192195}
193196
194197type mockLimits struct {
195- maxQueryLookback time.Duration
196- maxQueryLength time.Duration
197- maxCacheFreshness time.Duration
198- totalShards int
198+ maxQueryLookback time.Duration
199+ maxQueryLength time.Duration
200+ maxCacheFreshness time.Duration
201+ maxQueryParallelism int
202+ totalShards int
199203}
200204
201205func (m mockLimits ) MaxQueryLookback (string ) time.Duration {
@@ -206,8 +210,11 @@ func (m mockLimits) MaxQueryLength(string) time.Duration {
206210 return m .maxQueryLength
207211}
208212
209- func (mockLimits ) MaxQueryParallelism (string ) int {
210- return 14 // Flag default.
213+ func (m mockLimits ) MaxQueryParallelism (string ) int {
214+ if m .maxQueryParallelism == 0 {
215+ return 14 // Flag default.
216+ }
217+ return m .maxQueryParallelism
211218}
212219
213220func (m mockLimits ) MaxCacheFreshness (string ) time.Duration {
@@ -226,3 +233,149 @@ func (m *mockHandler) Do(ctx context.Context, req Request) (Response, error) {
226233 args := m .Called (ctx , req )
227234 return args .Get (0 ).(Response ), args .Error (1 )
228235}
236+
237+ func TestLimitedRoundTripper_MaxQueryParallelism (t * testing.T ) {
238+ var (
239+ maxQueryParallelism = 2
240+ count atomic.Int32
241+ max atomic.Int32
242+ downstream = RoundTripFunc (func (_ * http.Request ) (* http.Response , error ) {
243+ cur := count .Inc ()
244+ if cur > max .Load () {
245+ max .Store (cur )
246+ }
247+ defer count .Dec ()
248+ // simulate some work
249+ time .Sleep (20 * time .Millisecond )
250+ return & http.Response {
251+ Body : http .NoBody ,
252+ }, nil
253+ })
254+ ctx = user .InjectOrgID (context .Background (), "foo" )
255+ )
256+
257+ r , err := PrometheusCodec .EncodeRequest (ctx , & PrometheusRequest {
258+ Path : "/query_range" ,
259+ Start : time .Now ().Add (time .Hour ).Unix (),
260+ End : util .TimeToMillis (time .Now ()),
261+ Step : int64 (1 * time .Second * time .Millisecond ),
262+ Query : `foo` ,
263+ })
264+ require .Nil (t , err )
265+
266+ _ , err = NewLimitedRoundTripper (downstream , PrometheusCodec , mockLimits {maxQueryParallelism : maxQueryParallelism },
267+ MiddlewareFunc (func (next Handler ) Handler {
268+ return HandlerFunc (func (c context.Context , _ Request ) (Response , error ) {
269+ var wg sync.WaitGroup
270+ for i := 0 ; i < maxQueryParallelism + 20 ; i ++ {
271+ wg .Add (1 )
272+ go func () {
273+ defer wg .Done ()
274+ _ , _ = next .Do (c , & PrometheusRequest {})
275+ }()
276+ }
277+ wg .Wait ()
278+ return NewEmptyPrometheusResponse (), nil
279+ })
280+ }),
281+ ).RoundTrip (r )
282+ require .NoError (t , err )
283+ maxFound := int (max .Load ())
284+ require .LessOrEqual (t , maxFound , maxQueryParallelism , "max query parallelism: " , maxFound , " went over the configured one:" , maxQueryParallelism )
285+ }
286+
287+ func TestLimitedRoundTripper_MaxQueryParallelismLateScheduling (t * testing.T ) {
288+ var (
289+ maxQueryParallelism = 2
290+ downstream = RoundTripFunc (func (_ * http.Request ) (* http.Response , error ) {
291+ // simulate some work
292+ time .Sleep (20 * time .Millisecond )
293+ return & http.Response {
294+ Body : http .NoBody ,
295+ }, nil
296+ })
297+ ctx = user .InjectOrgID (context .Background (), "foo" )
298+ )
299+
300+ r , err := PrometheusCodec .EncodeRequest (ctx , & PrometheusRequest {
301+ Path : "/query_range" ,
302+ Start : time .Now ().Add (time .Hour ).Unix (),
303+ End : util .TimeToMillis (time .Now ()),
304+ Step : int64 (1 * time .Second * time .Millisecond ),
305+ Query : `foo` ,
306+ })
307+ require .Nil (t , err )
308+
309+ _ , err = NewLimitedRoundTripper (downstream , PrometheusCodec , mockLimits {maxQueryParallelism : maxQueryParallelism },
310+ MiddlewareFunc (func (next Handler ) Handler {
311+ return HandlerFunc (func (c context.Context , _ Request ) (Response , error ) {
312+ // fire up work and we don't wait.
313+ for i := 0 ; i < 10 ; i ++ {
314+ go func () {
315+ _ , _ = next .Do (c , & PrometheusRequest {})
316+ }()
317+ }
318+ return NewEmptyPrometheusResponse (), nil
319+ })
320+ }),
321+ ).RoundTrip (r )
322+ require .NoError (t , err )
323+ }
324+
325+ func TestLimitedRoundTripper_OriginalRequestContextCancellation (t * testing.T ) {
326+ var (
327+ maxQueryParallelism = 2
328+ downstream = RoundTripFunc (func (req * http.Request ) (* http.Response , error ) {
329+ // Sleep for a long time or until the request context is canceled.
330+ select {
331+ case <- time .After (time .Minute ):
332+ return & http.Response {Body : http .NoBody }, nil
333+ case <- req .Context ().Done ():
334+ return nil , req .Context ().Err ()
335+ }
336+ })
337+ reqCtx , reqCancel = context .WithCancel (user .InjectOrgID (context .Background (), "foo" ))
338+ )
339+
340+ r , err := PrometheusCodec .EncodeRequest (reqCtx , & PrometheusRequest {
341+ Path : "/query_range" ,
342+ Start : time .Now ().Add (time .Hour ).Unix (),
343+ End : util .TimeToMillis (time .Now ()),
344+ Step : int64 (1 * time .Second * time .Millisecond ),
345+ Query : `foo` ,
346+ })
347+ require .Nil (t , err )
348+
349+ _ , err = NewLimitedRoundTripper (downstream , PrometheusCodec , mockLimits {maxQueryParallelism : maxQueryParallelism },
350+ MiddlewareFunc (func (next Handler ) Handler {
351+ return HandlerFunc (func (c context.Context , _ Request ) (Response , error ) {
352+ var wg sync.WaitGroup
353+
354+ // Fire up some work. Each sub-request will either be blocked in the sleep or in the queue
355+ // waiting to be scheduled.
356+ for i := 0 ; i < maxQueryParallelism + 20 ; i ++ {
357+ wg .Add (1 )
358+ go func () {
359+ defer wg .Done ()
360+ _ , _ = next .Do (c , & PrometheusRequest {})
361+ }()
362+ }
363+
364+ // Give it a bit a time to get the first sub-requests running.
365+ time .Sleep (100 * time .Millisecond )
366+
367+ // Cancel the original request context.
368+ reqCancel ()
369+
370+ // Wait until all sub-requests have done. We expect all of them to cancel asap,
371+ // so it should take a very short time.
372+ waitStart := time .Now ()
373+ wg .Wait ()
374+ assert .Less (t , time .Since (waitStart ).Milliseconds (), int64 (100 ))
375+
376+ return NewEmptyPrometheusResponse (), nil
377+ })
378+ }),
379+ ).RoundTrip (r )
380+ require .NoError (t , err )
381+ }
0 commit comments