@@ -1032,7 +1032,7 @@ func TestDistributor_QueryStream_ShouldReturnErrorIfMaxChunksPerQueryLimitIsReac
1032
1032
limits : limits ,
1033
1033
})
1034
1034
1035
- ctx = limiter .AddQueryLimiterToContext (ctx , limiter .NewQueryLimiter (0 , 0 , maxChunksLimit ))
1035
+ ctx = limiter .AddQueryLimiterToContext (ctx , limiter .NewQueryLimiter (0 , 0 , maxChunksLimit , 0 ))
1036
1036
1037
1037
// Push a number of series below the max chunks limit. Each series has 1 sample,
1038
1038
// so expect 1 chunk per series when querying back.
@@ -1077,7 +1077,7 @@ func TestDistributor_QueryStream_ShouldReturnErrorIfMaxSeriesPerQueryLimitIsReac
1077
1077
ctx := user .InjectOrgID (context .Background (), "user" )
1078
1078
limits := & validation.Limits {}
1079
1079
flagext .DefaultValues (limits )
1080
- ctx = limiter .AddQueryLimiterToContext (ctx , limiter .NewQueryLimiter (maxSeriesLimit , 0 , 0 ))
1080
+ ctx = limiter .AddQueryLimiterToContext (ctx , limiter .NewQueryLimiter (maxSeriesLimit , 0 , 0 , 0 ))
1081
1081
1082
1082
// Prepare distributors.
1083
1083
ds , _ , _ , _ := prepare (t , prepConfig {
@@ -1161,7 +1161,7 @@ func TestDistributor_QueryStream_ShouldReturnErrorIfMaxChunkBytesPerQueryLimitIs
1161
1161
var maxBytesLimit = (seriesToAdd ) * responseChunkSize
1162
1162
1163
1163
// Update the limiter with the calculated limits.
1164
- ctx = limiter .AddQueryLimiterToContext (ctx , limiter .NewQueryLimiter (0 , maxBytesLimit , 0 ))
1164
+ ctx = limiter .AddQueryLimiterToContext (ctx , limiter .NewQueryLimiter (0 , maxBytesLimit , 0 , 0 ))
1165
1165
1166
1166
// Push a number of series below the max chunk bytes limit. Subtract one for the series added above.
1167
1167
writeReq = makeWriteRequest (0 , seriesToAdd - 1 , 0 )
@@ -1192,6 +1192,75 @@ func TestDistributor_QueryStream_ShouldReturnErrorIfMaxChunkBytesPerQueryLimitIs
1192
1192
assert .Equal (t , err , validation .LimitError (fmt .Sprintf (limiter .ErrMaxChunkBytesHit , maxBytesLimit )))
1193
1193
}
1194
1194
1195
+ func TestDistributor_QueryStream_ShouldReturnErrorIfMaxDataBytesPerQueryLimitIsReached (t * testing.T ) {
1196
+ const seriesToAdd = 10
1197
+
1198
+ ctx := user .InjectOrgID (context .Background (), "user" )
1199
+ limits := & validation.Limits {}
1200
+ flagext .DefaultValues (limits )
1201
+
1202
+ // Prepare distributors.
1203
+ // Use replication factor of 2 to always read all the chunks from both ingesters,
1204
+ // this guarantees us to always read the same chunks and have a stable test.
1205
+ ds , _ , _ , _ := prepare (t , prepConfig {
1206
+ numIngesters : 2 ,
1207
+ happyIngesters : 2 ,
1208
+ numDistributors : 1 ,
1209
+ shardByAllLabels : true ,
1210
+ limits : limits ,
1211
+ replicationFactor : 2 ,
1212
+ })
1213
+
1214
+ allSeriesMatchers := []* labels.Matcher {
1215
+ labels .MustNewMatcher (labels .MatchRegexp , model .MetricNameLabel , ".+" ),
1216
+ }
1217
+ // Push a single series to allow us to calculate the label size to calculate the limit for the test.
1218
+ writeReq := & cortexpb.WriteRequest {}
1219
+ writeReq .Timeseries = append (writeReq .Timeseries ,
1220
+ makeWriteRequestTimeseries ([]cortexpb.LabelAdapter {{Name : model .MetricNameLabel , Value : "another_series" }}, 0 , 0 ),
1221
+ )
1222
+ writeRes , err := ds [0 ].Push (ctx , writeReq )
1223
+ assert .Equal (t , & cortexpb.WriteResponse {}, writeRes )
1224
+ assert .Nil (t , err )
1225
+ labelSizeResponse , err := ds [0 ].QueryStream (ctx , math .MinInt32 , math .MaxInt32 , allSeriesMatchers ... )
1226
+ require .NoError (t , err )
1227
+
1228
+ // Use the resulting chunks size to calculate the limit as (series to add + our test series) * the response chunk size.
1229
+ var responseLabelSize = labelSizeResponse .LabelsSize ()
1230
+ var maxBytesLimit = (seriesToAdd ) * responseLabelSize * 2 // Multiplying by RF because the limit is applied before de-duping.
1231
+
1232
+ // Update the limiter with the calculated limits.
1233
+ ctx = limiter .AddQueryLimiterToContext (ctx , limiter .NewQueryLimiter (0 , 0 , 0 , maxBytesLimit ))
1234
+
1235
+ // Push a number of series below the max chunk bytes limit. Subtract one for the series added above.
1236
+ writeReq = makeWriteRequest (0 , seriesToAdd - 1 , 0 )
1237
+ writeRes , err = ds [0 ].Push (ctx , writeReq )
1238
+ assert .Equal (t , & cortexpb.WriteResponse {}, writeRes )
1239
+ assert .Nil (t , err )
1240
+
1241
+ // Since the number of chunk bytes is equal to the limit (but doesn't
1242
+ // exceed it), we expect a query running on all series to succeed.
1243
+ queryRes , err := ds [0 ].QueryStream (ctx , math .MinInt32 , math .MaxInt32 , allSeriesMatchers ... )
1244
+ require .NoError (t , err )
1245
+ assert .Len (t , queryRes .Chunkseries , seriesToAdd )
1246
+
1247
+ // Push another series to exceed the chunk bytes limit once we'll query back all series.
1248
+ writeReq = & cortexpb.WriteRequest {}
1249
+ writeReq .Timeseries = append (writeReq .Timeseries ,
1250
+ makeWriteRequestTimeseries ([]cortexpb.LabelAdapter {{Name : model .MetricNameLabel , Value : "another_series_1" }}, 0 , 0 ),
1251
+ )
1252
+
1253
+ writeRes , err = ds [0 ].Push (ctx , writeReq )
1254
+ assert .Equal (t , & cortexpb.WriteResponse {}, writeRes )
1255
+ assert .Nil (t , err )
1256
+
1257
+ // Since the aggregated chunk size is exceeding the limit, we expect
1258
+ // a query running on all series to fail.
1259
+ _ , err = ds [0 ].QueryStream (ctx , math .MinInt32 , math .MaxInt32 , allSeriesMatchers ... )
1260
+ require .Error (t , err )
1261
+ assert .Equal (t , err , validation .LimitError (fmt .Sprintf (limiter .ErrMaxDataBytesHit , maxBytesLimit )))
1262
+ }
1263
+
1195
1264
func TestDistributor_Push_LabelRemoval (t * testing.T ) {
1196
1265
ctx := user .InjectOrgID (context .Background (), "user" )
1197
1266
@@ -1930,7 +1999,7 @@ func TestDistributor_MetricsForLabelMatchers(t *testing.T) {
1930
1999
},
1931
2000
expectedResult : []metric.Metric {},
1932
2001
expectedIngesters : numIngesters ,
1933
- queryLimiter : limiter .NewQueryLimiter (0 , 0 , 0 ),
2002
+ queryLimiter : limiter .NewQueryLimiter (0 , 0 , 0 , 0 ),
1934
2003
expectedErr : nil ,
1935
2004
},
1936
2005
"should filter metrics by single matcher" : {
@@ -1942,7 +2011,7 @@ func TestDistributor_MetricsForLabelMatchers(t *testing.T) {
1942
2011
{Metric : util .LabelsToMetric (fixtures [1 ].lbls )},
1943
2012
},
1944
2013
expectedIngesters : numIngesters ,
1945
- queryLimiter : limiter .NewQueryLimiter (0 , 0 , 0 ),
2014
+ queryLimiter : limiter .NewQueryLimiter (0 , 0 , 0 , 0 ),
1946
2015
expectedErr : nil ,
1947
2016
},
1948
2017
"should filter metrics by multiple matchers" : {
@@ -1954,7 +2023,7 @@ func TestDistributor_MetricsForLabelMatchers(t *testing.T) {
1954
2023
{Metric : util .LabelsToMetric (fixtures [0 ].lbls )},
1955
2024
},
1956
2025
expectedIngesters : numIngesters ,
1957
- queryLimiter : limiter .NewQueryLimiter (0 , 0 , 0 ),
2026
+ queryLimiter : limiter .NewQueryLimiter (0 , 0 , 0 , 0 ),
1958
2027
expectedErr : nil ,
1959
2028
},
1960
2029
"should return all matching metrics even if their FastFingerprint collide" : {
@@ -1966,7 +2035,7 @@ func TestDistributor_MetricsForLabelMatchers(t *testing.T) {
1966
2035
{Metric : util .LabelsToMetric (fixtures [4 ].lbls )},
1967
2036
},
1968
2037
expectedIngesters : numIngesters ,
1969
- queryLimiter : limiter .NewQueryLimiter (0 , 0 , 0 ),
2038
+ queryLimiter : limiter .NewQueryLimiter (0 , 0 , 0 , 0 ),
1970
2039
expectedErr : nil ,
1971
2040
},
1972
2041
"should query only ingesters belonging to tenant's subring if shuffle sharding is enabled" : {
@@ -1980,7 +2049,7 @@ func TestDistributor_MetricsForLabelMatchers(t *testing.T) {
1980
2049
{Metric : util .LabelsToMetric (fixtures [1 ].lbls )},
1981
2050
},
1982
2051
expectedIngesters : 3 ,
1983
- queryLimiter : limiter .NewQueryLimiter (0 , 0 , 0 ),
2052
+ queryLimiter : limiter .NewQueryLimiter (0 , 0 , 0 , 0 ),
1984
2053
expectedErr : nil ,
1985
2054
},
1986
2055
"should query all ingesters if shuffle sharding is enabled but shard size is 0" : {
@@ -1994,7 +2063,7 @@ func TestDistributor_MetricsForLabelMatchers(t *testing.T) {
1994
2063
{Metric : util .LabelsToMetric (fixtures [1 ].lbls )},
1995
2064
},
1996
2065
expectedIngesters : numIngesters ,
1997
- queryLimiter : limiter .NewQueryLimiter (0 , 0 , 0 ),
2066
+ queryLimiter : limiter .NewQueryLimiter (0 , 0 , 0 , 0 ),
1998
2067
expectedErr : nil ,
1999
2068
},
2000
2069
"should return err if series limit is exhausted" : {
@@ -2005,7 +2074,7 @@ func TestDistributor_MetricsForLabelMatchers(t *testing.T) {
2005
2074
},
2006
2075
expectedResult : nil ,
2007
2076
expectedIngesters : numIngesters ,
2008
- queryLimiter : limiter .NewQueryLimiter (1 , 0 , 0 ),
2077
+ queryLimiter : limiter .NewQueryLimiter (1 , 0 , 0 , 0 ),
2009
2078
expectedErr : validation .LimitError (fmt .Sprintf (limiter .ErrMaxSeriesHit , 1 )),
2010
2079
},
2011
2080
"should not exhaust series limit when only one series is fetched" : {
@@ -2016,7 +2085,7 @@ func TestDistributor_MetricsForLabelMatchers(t *testing.T) {
2016
2085
{Metric : util .LabelsToMetric (fixtures [2 ].lbls )},
2017
2086
},
2018
2087
expectedIngesters : numIngesters ,
2019
- queryLimiter : limiter .NewQueryLimiter (1 , 0 , 0 ),
2088
+ queryLimiter : limiter .NewQueryLimiter (1 , 0 , 0 , 0 ),
2020
2089
expectedErr : nil ,
2021
2090
},
2022
2091
}
@@ -2116,7 +2185,7 @@ func BenchmarkDistributor_MetricsForLabelMatchers(b *testing.B) {
2116
2185
matchers : []* labels.Matcher {
2117
2186
mustNewMatcher (labels .MatchRegexp , model .MetricNameLabel , "foo.+" ),
2118
2187
},
2119
- queryLimiter : limiter .NewQueryLimiter (100 , 0 , 0 ),
2188
+ queryLimiter : limiter .NewQueryLimiter (100 , 0 , 0 , 0 ),
2120
2189
expectedErr : nil ,
2121
2190
},
2122
2191
}
0 commit comments