diff --git a/Gopkg.lock b/Gopkg.lock index 250b4fa3606a..bc716a09b7cf 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -186,8 +186,8 @@ version = "v2.0.0" [[projects]] - branch = "master" - digest = "1:5750dfa5a8160b51a01c02d6d7841b50fe7d2d97344c200fc4cdc3c46b3953f8" + branch = "lazy-load-chunks" + digest = "1:bf1fa66c54722bc8664f1465e427cd6fe7df52f2b6fd5ab996baf37601687b70" name = "github.com/cortexproject/cortex" packages = [ "pkg/chunk", @@ -211,10 +211,10 @@ "pkg/util/middleware", "pkg/util/spanlogger", "pkg/util/validation", - "pkg/util/wire", ] pruneopts = "UT" - revision = "ff51bd3c7267184042ea4cf347e6d1fa24934c91" + revision = "95a3f308e95617732b76e337874e83ccf173cf14" + source = "https://github.com/grafana/cortex" [[projects]] digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" @@ -1366,7 +1366,6 @@ "github.com/cortexproject/cortex/pkg/util", "github.com/cortexproject/cortex/pkg/util/flagext", "github.com/cortexproject/cortex/pkg/util/validation", - "github.com/cortexproject/cortex/pkg/util/wire", "github.com/fatih/color", "github.com/go-kit/kit/log", "github.com/go-kit/kit/log/level", @@ -1390,10 +1389,12 @@ "github.com/prometheus/prometheus/discovery/targetgroup", "github.com/prometheus/prometheus/pkg/labels", "github.com/prometheus/prometheus/pkg/relabel", + "github.com/prometheus/prometheus/pkg/textparse", "github.com/prometheus/prometheus/relabel", "github.com/stretchr/testify/assert", "github.com/stretchr/testify/require", "github.com/weaveworks/common/httpgrpc", + "github.com/weaveworks/common/httpgrpc/server", "github.com/weaveworks/common/middleware", "github.com/weaveworks/common/server", "github.com/weaveworks/common/tracing", diff --git a/Gopkg.toml b/Gopkg.toml index e35786b0c722..a190ef298052 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -26,7 +26,8 @@ [[constraint]] name = "github.com/cortexproject/cortex" - branch = "master" + source = "https://github.com/grafana/cortex" + branch = "lazy-load-chunks" [[constraint]] name = "github.com/weaveworks/common" diff --git a/cmd/loki/main.go b/cmd/loki/main.go index 073e8a0bd88e..da010c9b34f4 100644 --- a/cmd/loki/main.go +++ b/cmd/loki/main.go @@ -14,6 +14,7 @@ import ( "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + "github.com/cortexproject/cortex/pkg/util/validation" ) func init() { @@ -29,6 +30,15 @@ func main() { flagext.RegisterFlags(&cfg) flag.Parse() + // LimitsConfig has a customer UnmarshalYAML that will set the defaults to a global. + // This global is set to the config passed into the last call to `NewOverrides`. If we don't + // call it atleast once, the defaults are set to an empty struct. + // We call it with the flag values so that the config file unmarshalling only overrides the values set in the config. + if _, err := validation.NewOverrides(cfg.LimitsConfig); err != nil { + level.Error(util.Logger).Log("msg", "error loading limits", "err", err) + os.Exit(1) + } + util.InitLogger(&cfg.Server) if configFile != "" { diff --git a/docs/api.md b/docs/api.md index 9980878ffec5..db618291b931 100644 --- a/docs/api.md +++ b/docs/api.md @@ -46,7 +46,7 @@ The Loki server has the following API endpoints (_Note:_ Authentication is out o "labels": "{instance=\"...\", job=\"...\", namespace=\"...\"}", "entries": [ { - "timestamp": "2018-06-27T05:20:28.699492635Z", + "ts": "2018-06-27T05:20:28.699492635Z", "line": "..." }, ... @@ -88,6 +88,7 @@ The Loki server has the following API endpoints (_Note:_ Authentication is out o } ``` -## Example of using the API in a third-party client library +## Examples of using the API in a third-party client library -Take a look at this [client](https://github.com/afiskon/promtail-client), but be aware that the API is not stable yet. +1) Take a look at this [client](https://github.com/afiskon/promtail-client), but be aware that the API is not stable yet (Golang). +2) Example on [Python3](https://github.com/sleleko/devops-kb/blob/master/python/push-to-loki.py) diff --git a/docs/operations.md b/docs/operations.md index 357c99a49117..883766832a12 100644 --- a/docs/operations.md +++ b/docs/operations.md @@ -141,7 +141,7 @@ The S3 configuration is setup with url format: `s3://access_key:secret_access_ke #### DynamoDB Loki uses DynamoDB for the index storage. It is used for querying logs, make -sure you adjuest your throughput to your usage. +sure you adjust your throughput to your usage. DynamoDB access is very similar to S3, however you do not need to specify a table name in the storage section, as Loki will calculate that for you. diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 7e14d798f93e..90a913b37642 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -45,8 +45,6 @@ to Both binaries support a log level parameter on the command-line, e.g.: `loki —log.level= debug ...` -## No labels: - ## Failed to create target, "ioutil.ReadDir: readdirent: not a directory" The promtail configuration contains a `__path__` entry to a directory that promtail cannot find. @@ -82,3 +80,13 @@ Once connected, verify the config in `/etc/promtail/promtail.yml` is what you ex Also check `/var/log/positions.yaml` and make sure promtail is tailing the logs you would expect You can check the promtail log by looking in `/var/log/containers` at the promtail container log + +## Enable tracing for loki + +We support (jaeger)[https://www.jaegertracing.io/] to trace loki, just add env `JAEGER_AGENT_HOST` to where loki run, and you can use jaeger to trace. + +If you deploy with helm, refer to following command: + +```bash +$ helm upgrade --install loki loki/loki --set "loki.jaegerAgentHost=YOUR_JAEGER_AGENT_HOST" +``` diff --git a/pkg/chunkenc/lazy_chunk.go b/pkg/chunkenc/lazy_chunk.go new file mode 100644 index 000000000000..f23b404925a9 --- /dev/null +++ b/pkg/chunkenc/lazy_chunk.go @@ -0,0 +1,87 @@ +package chunkenc + +import ( + "context" + "time" + + "github.com/cortexproject/cortex/pkg/chunk" + "github.com/grafana/loki/pkg/iter" + "github.com/grafana/loki/pkg/logproto" +) + +// LazyChunk loads the chunk when it is accessed. +type LazyChunk struct { + Chunk chunk.Chunk + Fetcher *chunk.Fetcher +} + +func (c *LazyChunk) getChunk(ctx context.Context) (Chunk, error) { + chunks, err := c.Fetcher.FetchChunks(ctx, []chunk.Chunk{c.Chunk}, []string{c.Chunk.ExternalKey()}) + if err != nil { + return nil, err + } + + c.Chunk = chunks[0] + return chunks[0].Data.(*Facade).LokiChunk(), nil +} + +// Iterator returns an entry iterator. +func (c LazyChunk) Iterator(ctx context.Context, from, through time.Time, direction logproto.Direction) (iter.EntryIterator, error) { + // If the chunk is already loaded, then use that. + if c.Chunk.Data != nil { + lokiChunk := c.Chunk.Data.(*Facade).LokiChunk() + return lokiChunk.Iterator(from, through, direction) + } + + return &lazyIterator{ + chunk: c, + + from: from, + through: through, + direction: direction, + context: ctx, + }, nil +} + +type lazyIterator struct { + iter.EntryIterator + + chunk LazyChunk + err error + + from, through time.Time + direction logproto.Direction + context context.Context +} + +func (it *lazyIterator) Next() bool { + if it.err != nil { + return false + } + + if it.EntryIterator != nil { + return it.EntryIterator.Next() + } + + chk, err := it.chunk.getChunk(it.context) + if err != nil { + it.err = err + return false + } + + it.EntryIterator, it.err = chk.Iterator(it.from, it.through, it.direction) + + return it.Next() +} + +func (it *lazyIterator) Labels() string { + return it.chunk.Chunk.Metric.String() +} + +func (it *lazyIterator) Error() error { + if it.err != nil { + return it.err + } + + return it.EntryIterator.Error() +} diff --git a/pkg/ingester/flush.go b/pkg/ingester/flush.go index f0c89017149c..b00d40647526 100644 --- a/pkg/ingester/flush.go +++ b/pkg/ingester/flush.go @@ -8,6 +8,8 @@ import ( "golang.org/x/net/context" "github.com/go-kit/kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/common/model" "github.com/weaveworks/common/user" @@ -17,6 +19,34 @@ import ( "github.com/grafana/loki/pkg/chunkenc" ) +var ( + chunkEntries = promauto.NewHistogram(prometheus.HistogramOpts{ + Name: "loki_ingester_chunk_entries", + Help: "Distribution of stored chunk entries (when stored).", + Buckets: prometheus.ExponentialBuckets(20, 2, 11), // biggest bucket is 5*2^(11-1) = 5120 + }) + chunkSize = promauto.NewHistogram(prometheus.HistogramOpts{ + Name: "loki_ingester_chunk_size_bytes", + Help: "Distribution of stored chunk sizes (when stored).", + Buckets: prometheus.ExponentialBuckets(500, 2, 5), // biggest bucket is 500*2^(5-1) = 8000 + }) + chunksPerTenant = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "loki_ingester_chunks_stored_total", + Help: "Total stored chunks per tenant.", + }, []string{"tenant"}) + chunkSizePerTenant = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "loki_ingester_chunk_stored_bytes_total", + Help: "Total bytes stored in chunks per tenant.", + }, []string{"tenant"}) + chunkAge = promauto.NewHistogram(prometheus.HistogramOpts{ + Name: "loki_ingester_chunk_age_seconds", + Help: "Distribution of chunk ages (when stored).", + // with default settings chunks should flush between 5 min and 12 hours + // so buckets at 1min, 5min, 10min, 30min, 1hr, 2hr, 4hr, 10hr, 12hr, 16hr + Buckets: []float64{60, 300, 600, 1800, 3600, 7200, 14400, 36000, 43200, 57600}, + }) +) + const ( // Backoff for retrying 'immediate' flushes. Only counts for queue // position, not wallclock time. @@ -154,7 +184,7 @@ func (i *Ingester) flushUserSeries(userID string, fp model.Fingerprint, immediat return nil } -func (i *Ingester) collectChunksToFlush(instance *instance, fp model.Fingerprint, immediate bool) ([]*chunkDesc, []client.LabelPair) { +func (i *Ingester) collectChunksToFlush(instance *instance, fp model.Fingerprint, immediate bool) ([]*chunkDesc, []client.LabelAdapter) { instance.streamsMtx.Lock() defer instance.streamsMtx.Unlock() @@ -204,18 +234,18 @@ func (i *Ingester) removeFlushedChunks(instance *instance, stream *stream) { if len(stream.chunks) == 0 { delete(instance.streams, stream.fp) - instance.index.Delete(client.FromLabelPairsToLabels(stream.labels), stream.fp) + instance.index.Delete(client.FromLabelAdaptersToLabels(stream.labels), stream.fp) instance.streamsRemovedTotal.Inc() } } -func (i *Ingester) flushChunks(ctx context.Context, fp model.Fingerprint, labelPairs []client.LabelPair, cs []*chunkDesc) error { +func (i *Ingester) flushChunks(ctx context.Context, fp model.Fingerprint, labelPairs []client.LabelAdapter, cs []*chunkDesc) error { userID, err := user.ExtractOrgID(ctx) if err != nil { return err } - metric := fromLabelPairs(labelPairs) + metric := client.FromLabelAdaptersToMetric(labelPairs) metric[nameLabel] = logsValue wireChunks := make([]chunk.Chunk, 0, len(cs)) @@ -234,13 +264,27 @@ func (i *Ingester) flushChunks(ctx context.Context, fp model.Fingerprint, labelP wireChunks = append(wireChunks, c) } - return i.store.Put(ctx, wireChunks) -} + if err := i.store.Put(ctx, wireChunks); err != nil { + return err + } + + // Record statistsics only when actual put request did not return error. + sizePerTenant := chunkSizePerTenant.WithLabelValues(userID) + countPerTenant := chunksPerTenant.WithLabelValues(userID) + for i, wc := range wireChunks { + numEntries := cs[i].chunk.Size() + byt, err := wc.Encoded() + if err != nil { + continue + } -func fromLabelPairs(ls []client.LabelPair) model.Metric { - m := make(model.Metric, len(ls)) - for _, l := range ls { - m[model.LabelName(l.Name)] = model.LabelValue(l.Value) + chunkEntries.Observe(float64(numEntries)) + chunkSize.Observe(float64(len(byt))) + sizePerTenant.Add(float64(len(byt))) + countPerTenant.Inc() + firstTime, _ := cs[i].chunk.Bounds() + chunkAge.Observe(time.Since(firstTime).Seconds()) } - return m + + return nil } diff --git a/pkg/ingester/instance.go b/pkg/ingester/instance.go index 502fee4c019c..009fe489523a 100644 --- a/pkg/ingester/instance.go +++ b/pkg/ingester/instance.go @@ -121,16 +121,16 @@ func (i *instance) Query(req *logproto.QueryRequest, queryServer logproto.Querie func (i *instance) Label(ctx context.Context, req *logproto.LabelRequest) (*logproto.LabelResponse, error) { var labels []string if req.Values { - values := i.index.LabelValues(model.LabelName(req.Name)) + values := i.index.LabelValues(req.Name) labels = make([]string, len(values)) for i := 0; i < len(values); i++ { - labels[i] = string(values[i]) + labels[i] = values[i] } } else { names := i.index.LabelNames() labels = make([]string, len(names)) for i := 0; i < len(names); i++ { - labels[i] = string(names[i]) + labels[i] = names[i] } } return &logproto.LabelResponse{ diff --git a/pkg/ingester/stream.go b/pkg/ingester/stream.go index f2a6529ac202..93d8453c581d 100644 --- a/pkg/ingester/stream.go +++ b/pkg/ingester/stream.go @@ -47,7 +47,7 @@ type stream struct { // Not thread-safe; assume accesses to this are locked by caller. chunks []chunkDesc fp model.Fingerprint - labels []client.LabelPair + labels []client.LabelAdapter } type chunkDesc struct { @@ -58,7 +58,7 @@ type chunkDesc struct { lastUpdated time.Time } -func newStream(fp model.Fingerprint, labels []client.LabelPair) *stream { +func newStream(fp model.Fingerprint, labels []client.LabelAdapter) *stream { return &stream{ fp: fp, labels: labels, @@ -96,7 +96,7 @@ func (s *stream) Push(_ context.Context, entries []logproto.Entry) error { } if appendErr == chunkenc.ErrOutOfOrder { - return httpgrpc.Errorf(http.StatusBadRequest, "entry out of order for stream: %s", client.FromLabelPairsToLabels(s.labels).String()) + return httpgrpc.Errorf(http.StatusBadRequest, "entry out of order for stream: %s", client.FromLabelAdaptersToLabels(s.labels).String()) } return appendErr @@ -121,5 +121,5 @@ func (s *stream) Iterator(from, through time.Time, direction logproto.Direction) } } - return iter.NewNonOverlappingIterator(iterators, client.FromLabelPairsToLabels(s.labels).String()), nil + return iter.NewNonOverlappingIterator(iterators, client.FromLabelAdaptersToLabels(s.labels).String()), nil } diff --git a/pkg/iter/iterator.go b/pkg/iter/iterator.go index 6d55ade370a5..94ed63422bba 100644 --- a/pkg/iter/iterator.go +++ b/pkg/iter/iterator.go @@ -352,11 +352,15 @@ func (i *nonOverlappingIterator) Entry() logproto.Entry { } func (i *nonOverlappingIterator) Labels() string { - return i.labels + if i.labels != "" { + return i.labels + } + + return i.curr.Labels() } func (i *nonOverlappingIterator) Error() error { - return nil + return i.curr.Error() } func (i *nonOverlappingIterator) Close() error { diff --git a/pkg/logproto/dep.go b/pkg/logproto/dep.go index 68eb69c94481..34d7bc3d65d1 100644 --- a/pkg/logproto/dep.go +++ b/pkg/logproto/dep.go @@ -2,6 +2,6 @@ package logproto import ( // trick dep into including this, needed by the generated code. - _ "github.com/cortexproject/cortex/pkg/util/wire" + _ "github.com/cortexproject/cortex/pkg/chunk/storage" _ "github.com/gogo/protobuf/types" ) diff --git a/pkg/querier/store.go b/pkg/querier/store.go index 008f2cdb16ca..eaba978c8e3b 100644 --- a/pkg/querier/store.go +++ b/pkg/querier/store.go @@ -5,6 +5,7 @@ import ( "sort" "github.com/cortexproject/cortex/pkg/chunk" + "github.com/opentracing/opentracing-go" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" @@ -27,60 +28,79 @@ func (q Querier) queryStore(ctx context.Context, req *logproto.QueryRequest) ([] matchers = append(matchers, nameLabelMatcher) from, through := model.TimeFromUnixNano(req.Start.UnixNano()), model.TimeFromUnixNano(req.End.UnixNano()) - chunks, err := q.store.Get(ctx, from, through, matchers...) + chks, fetchers, err := q.store.GetChunkRefs(ctx, from, through, matchers...) if err != nil { return nil, err } - return partitionBySeriesChunks(req, chunks) -} - -func partitionBySeriesChunks(req *logproto.QueryRequest, chunks []chunk.Chunk) ([]iter.EntryIterator, error) { - chunksByFp := map[model.Fingerprint][]chunk.Chunk{} - metricByFp := map[model.Fingerprint]model.Metric{} - for _, c := range chunks { - fp := c.Metric.Fingerprint() - chunksByFp[fp] = append(chunksByFp[fp], c) - delete(c.Metric, "__name__") - metricByFp[fp] = c.Metric + for i := range chks { + chks[i] = filterChunksByTime(from, through, chks[i]) } - iters := make([]iter.EntryIterator, 0, len(chunksByFp)) - for fp := range chunksByFp { - iterators, err := partitionOverlappingChunks(req, metricByFp[fp].String(), chunksByFp[fp]) - if err != nil { - return nil, err - } - iterator := iter.NewHeapIterator(iterators, req.Direction) - iters = append(iters, iterator) + chksBySeries := partitionBySeriesChunks(chks, fetchers) + // Make sure the initial chunks are loaded. This is not one chunk + // per series, but rather a chunk per non-overlapping iterator. + if err := loadFirstChunks(ctx, chksBySeries); err != nil { + return nil, err } - return iters, nil + // Now that we have the first chunk for each series loaded, + // we can proceed to filter the series that don't match. + chksBySeries = filterSeriesByMatchers(chksBySeries, matchers) + + return buildIterators(ctx, req, chksBySeries) } -func partitionOverlappingChunks(req *logproto.QueryRequest, labels string, chunks []chunk.Chunk) ([]iter.EntryIterator, error) { - sort.Sort(byFrom(chunks)) +func filterChunksByTime(from, through model.Time, chunks []chunk.Chunk) []chunk.Chunk { + filtered := make([]chunk.Chunk, 0, len(chunks)) + keys := make([]string, 0, len(chunks)) + for _, chunk := range chunks { + if chunk.Through < from || through < chunk.From { + continue + } + filtered = append(filtered, chunk) + keys = append(keys, chunk.ExternalKey()) + } + return filtered +} - css := [][]chunk.Chunk{} +func filterSeriesByMatchers(chks map[model.Fingerprint][][]chunkenc.LazyChunk, matchers []*labels.Matcher) map[model.Fingerprint][][]chunkenc.LazyChunk { outer: - for _, c := range chunks { - for i, cs := range css { - if cs[len(cs)-1].Through.Before(c.From) { - css[i] = append(css[i], c) + for fp, chunks := range chks { + for _, matcher := range matchers { + if !matcher.Matches(string(chunks[0][0].Chunk.Metric[model.LabelName(matcher.Name)])) { + delete(chks, fp) continue outer } } - cs := make([]chunk.Chunk, 0, len(chunks)/(len(css)+1)) - cs = append(cs, c) - css = append(css, cs) } - result := make([]iter.EntryIterator, 0, len(css)) - for i := range css { - iterators := make([]iter.EntryIterator, 0, len(css[i])) - for j := range css[i] { - lokiChunk := css[i][j].Data.(*chunkenc.Facade).LokiChunk() - iterator, err := lokiChunk.Iterator(req.Start, req.End, req.Direction) + return chks +} + +func buildIterators(ctx context.Context, req *logproto.QueryRequest, chks map[model.Fingerprint][][]chunkenc.LazyChunk) ([]iter.EntryIterator, error) { + result := make([]iter.EntryIterator, 0, len(chks)) + for _, chunks := range chks { + iterator, err := buildHeapIterator(ctx, req, chunks) + if err != nil { + return nil, err + } + + result = append(result, iterator) + } + + return result, nil +} + +func buildHeapIterator(ctx context.Context, req *logproto.QueryRequest, chks [][]chunkenc.LazyChunk) (iter.EntryIterator, error) { + result := make([]iter.EntryIterator, 0, len(chks)) + + labels := chks[0][0].Chunk.Metric.String() + + for i := range chks { + iterators := make([]iter.EntryIterator, 0, len(chks[i])) + for j := range chks[i] { + iterator, err := chks[i][j].Iterator(ctx, req.Start, req.End, req.Direction) if err != nil { return nil, err } @@ -92,13 +112,101 @@ outer: } iterators = append(iterators, iterator) } + result = append(result, iter.NewNonOverlappingIterator(iterators, labels)) } - return result, nil + + return iter.NewHeapIterator(result, req.Direction), nil +} + +func loadFirstChunks(ctx context.Context, chks map[model.Fingerprint][][]chunkenc.LazyChunk) error { + sp, ctx := opentracing.StartSpanFromContext(ctx, "loadFirstChunks") + defer sp.Finish() + + // If chunks span buckets, then we'll have different fetchers for each bucket. + chksByFetcher := map[*chunk.Fetcher][]*chunkenc.LazyChunk{} + for _, lchks := range chks { + for _, lchk := range lchks { + if len(lchk) == 0 { + continue + } + chksByFetcher[lchk[0].Fetcher] = append(chksByFetcher[lchk[0].Fetcher], &lchk[0]) + } + } + + errChan := make(chan error) + for fetcher, chunks := range chksByFetcher { + go func(fetcher *chunk.Fetcher, chunks []*chunkenc.LazyChunk) { + keys := make([]string, 0, len(chunks)) + chks := make([]chunk.Chunk, 0, len(chunks)) + for _, chk := range chunks { + keys = append(keys, chk.Chunk.ExternalKey()) + chks = append(chks, chk.Chunk) + } + chks, err := fetcher.FetchChunks(ctx, chks, keys) + if err != nil { + errChan <- err + return + } + + for i, chk := range chks { + chunks[i].Chunk = chk + } + + errChan <- nil + }(fetcher, chunks) + } + + var lastErr error + for i := 0; i < len(chksByFetcher); i++ { + if err := <-errChan; err != nil { + lastErr = err + } + } + + return lastErr } -type byFrom []chunk.Chunk +func partitionBySeriesChunks(chunks [][]chunk.Chunk, fetchers []*chunk.Fetcher) map[model.Fingerprint][][]chunkenc.LazyChunk { + chunksByFp := map[model.Fingerprint][]chunkenc.LazyChunk{} + for i, chks := range chunks { + for _, c := range chks { + fp := c.Metric.Fingerprint() + chunksByFp[fp] = append(chunksByFp[fp], chunkenc.LazyChunk{Chunk: c, Fetcher: fetchers[i]}) + delete(c.Metric, "__name__") + } + } + + result := make(map[model.Fingerprint][][]chunkenc.LazyChunk, len(chunksByFp)) -func (b byFrom) Len() int { return len(b) } -func (b byFrom) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b byFrom) Less(i, j int) bool { return b[i].From < b[j].From } + for fp, chks := range chunksByFp { + result[fp] = partitionOverlappingChunks(chks) + } + + return result +} + +// partitionOverlappingChunks splits the list of chunks into different non-overlapping lists. +func partitionOverlappingChunks(chunks []chunkenc.LazyChunk) [][]chunkenc.LazyChunk { + sort.Slice(chunks, func(i, j int) bool { + return chunks[i].Chunk.From < chunks[i].Chunk.From + }) + + css := [][]chunkenc.LazyChunk{} +outer: + for _, c := range chunks { + for i, cs := range css { + // If the chunk doesn't overlap with the current list, then add it to it. + if cs[len(cs)-1].Chunk.Through.Before(c.Chunk.From) { + css[i] = append(css[i], c) + continue outer + } + } + // If the chunk overlaps with every existing list, then create a new list. + cs := make([]chunkenc.LazyChunk, 0, len(chunks)/(len(css)+1)) + cs = append(cs, c) + css = append(css, cs) + } + + return css +} diff --git a/pkg/util/conv.go b/pkg/util/conv.go index b44bed5ce77a..77dce97f3155 100644 --- a/pkg/util/conv.go +++ b/pkg/util/conv.go @@ -2,22 +2,21 @@ package util import ( "github.com/cortexproject/cortex/pkg/ingester/client" - "github.com/cortexproject/cortex/pkg/util/wire" "github.com/grafana/loki/pkg/parser" ) // ToClientLabels parses the labels and converts them to the Cortex type. -func ToClientLabels(labels string) ([]client.LabelPair, error) { +func ToClientLabels(labels string) ([]client.LabelAdapter, error) { ls, err := parser.Labels(labels) if err != nil { return nil, err } - pairs := make([]client.LabelPair, 0, len(ls)) + pairs := make([]client.LabelAdapter, 0, len(ls)) for i := 0; i < len(ls); i++ { - pairs = append(pairs, client.LabelPair{ - Name: wire.Bytes(ls[i].Name), - Value: wire.Bytes(ls[i].Value), + pairs = append(pairs, client.LabelAdapter{ + Name: ls[i].Name, + Value: ls[i].Value, }) } return pairs, nil diff --git a/production/helm/README.md b/production/helm/README.md index 833a1325eeaa..59e5b39b123b 100644 --- a/production/helm/README.md +++ b/production/helm/README.md @@ -56,3 +56,41 @@ $ kubectl port-forward --namespace service/loki-grafana 3000:80 Navigate to http://localhost:3000 and login with `admin` and the password output above. Then follow the [instructions for adding the loki datasource](/docs/usage.md), using the URL `http://loki:3100/`. + +## Run Loki behind https ingress + +If Loki and Promtail are deployed on different clusters you can add an Ingress in front of Loki. +By adding a certificate you create an https endpoint. For extra security enable basic authentication on the Ingress. + +In promtail set the following values to communicate with https and basic auth + +``` +loki: + serviceScheme: https + user: user + password: pass +``` + +Sample helm template for ingress: +``` +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: +annotations: + kubernetes.io/ingress.class: {{ .Values.ingress.class }} + ingress.kubernetes.io/auth-type: "basic" + ingress.kubernetes.io/auth-secret: {{ .Values.ingress.basic.secret }} +name: loki +spec: +rules: +- host: {{ .Values.ingress.host }} + http: + paths: + - backend: + serviceName: loki + servicePort: 3100 +tls: +- secretName: {{ .Values.ingress.cert }} + hosts: + - {{ .Values.ingress.host }} +``` diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml index e6cc11d36875..842628d995fe 100644 --- a/production/helm/loki/Chart.yaml +++ b/production/helm/loki/Chart.yaml @@ -1,5 +1,5 @@ name: loki -version: 0.7.0 +version: 0.7.1 appVersion: 0.0.1 kubeVersion: "^1.10.0-0" description: "Loki: like Prometheus, but for logs." diff --git a/production/helm/loki/templates/deployment.yaml b/production/helm/loki/templates/deployment.yaml index 915cb17545d1..8f3257eff545 100644 --- a/production/helm/loki/templates/deployment.yaml +++ b/production/helm/loki/templates/deployment.yaml @@ -27,6 +27,9 @@ spec: app: {{ template "loki.name" . }} name: {{ template "loki.name" . }} release: {{ .Release.Name }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} annotations: checksum/config: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} {{- with .Values.podAnnotations }} @@ -45,6 +48,9 @@ spec: imagePullPolicy: {{ .Values.image.pullPolicy }} args: - "-config.file=/etc/loki/loki.yaml" + {{- range $key, $value := .Values.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} volumeMounts: - name: config mountPath: /etc/loki @@ -63,6 +69,9 @@ spec: {{- toYaml .Values.resources | nindent 12 }} securityContext: readOnlyRootFilesystem: true + env: + - name: JAEGER_AGENT_HOST + value: "{{ .Values.tracing.jaegerAgentHost }}" nodeSelector: {{- toYaml .Values.nodeSelector | nindent 8 }} affinity: diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml index 262578358223..560a374062f7 100644 --- a/production/helm/loki/values.yaml +++ b/production/helm/loki/values.yaml @@ -14,8 +14,13 @@ affinity: {} ## Deployment annotations annotations: {} +# enable tracing for debug, need install jaeger and specify right jaeger_agent_host +tracing: + jaegerAgentHost: + config: auth_enabled: false + ingester: chunk_idle_period: 15m lifecycler: @@ -58,6 +63,10 @@ image: tag: latest pullPolicy: Always # Always pull while in BETA +## Additional Loki container arguments, e.g. log level (debug, info, warn, error) +extraArgs: {} + # log.level: debug + livenessProbe: httpGet: path: /ready @@ -87,6 +96,9 @@ persistence: # subPath: "" # existingClaim: +## Pod Labels +podLabels: {} + ## Pod Annotations podAnnotations: {} # prometheus.io/scrape: "true" @@ -123,7 +135,7 @@ securityContext: service: type: ClusterIP - nodePort: + nodePort: port: 3100 annotations: {} labels: {} diff --git a/production/helm/promtail/Chart.yaml b/production/helm/promtail/Chart.yaml index 97c5129a8731..3431acd32698 100644 --- a/production/helm/promtail/Chart.yaml +++ b/production/helm/promtail/Chart.yaml @@ -1,5 +1,5 @@ name: promtail -version: 0.7.1 +version: 0.6.4 appVersion: 0.0.1 kubeVersion: "^1.10.0-0" description: "Responsible for gathering logs and sending them to Loki" diff --git a/production/helm/promtail/templates/daemonset.yaml b/production/helm/promtail/templates/daemonset.yaml index d008d0803ba0..a82db61252df 100644 --- a/production/helm/promtail/templates/daemonset.yaml +++ b/production/helm/promtail/templates/daemonset.yaml @@ -24,6 +24,9 @@ spec: labels: app: {{ template "promtail.name" . }} release: {{ .Release.Name }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} annotations: {{ toYaml .Values.podAnnotations | nindent 8 }} spec: @@ -37,7 +40,11 @@ spec: imagePullPolicy: {{ .Values.image.pullPolicy }} args: - "-config.file=/etc/promtail/promtail.yaml" - - "-client.url=http://{{ include "loki.serviceName" . }}:{{ .Values.loki.servicePort }}/api/prom/push" + {{- if and .Values.loki.user .Values.loki.password }} + - "-client.url={{ .Values.loki.serviceScheme }}://{{ .Values.loki.user }}:{{ .Values.loki.password }}@{{ include "loki.serviceName" . }}:{{ .Values.loki.servicePort }}/api/prom/push" + {{- else }} + - "-client.url={{ .Values.loki.serviceScheme }}://{{ include "loki.serviceName" . }}:{{ .Values.loki.servicePort }}/api/prom/push" + {{- end }} volumeMounts: - name: config mountPath: /etc/promtail diff --git a/production/helm/promtail/values.yaml b/production/helm/promtail/values.yaml index dbaa598666b2..9740189806c9 100644 --- a/production/helm/promtail/values.yaml +++ b/production/helm/promtail/values.yaml @@ -18,6 +18,9 @@ livenessProbe: {} loki: serviceName: "" # Defaults to "${RELEASE}-loki" if not set servicePort: 3100 + serviceScheme: http + # user: user + # password: pass nameOverride: promtail @@ -25,6 +28,9 @@ nameOverride: promtail ## ref: https://kubernetes.io/docs/user-guide/node-selection/ nodeSelector: {} +## Pod Labels +podLabels: {} + podAnnotations: {} # prometheus.io/scrape: "true" # prometheus.io/port: "http-metrics" @@ -111,6 +117,9 @@ config: http_listen_port: 3101 positions: filename: /run/promtail/positions.yaml + target_config: + # Period to resync directories being watched and files being tailed + sync_period: 10s scrape_configs: - entry_parser: '{{ .Values.entryParser }}' job_name: kubernetes-pods-name diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/aws_autoscaling.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/aws_autoscaling.go index 4643ed7897e4..42a1a637ce4b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/aws_autoscaling.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/aws_autoscaling.go @@ -44,7 +44,7 @@ func newAWSAutoscale(cfg DynamoDBConfig, callManager callManager) (*awsAutoscale return nil, err } return &awsAutoscale{ - call: callManager, + call: callManager, ApplicationAutoScaling: applicationautoscaling.New(session), }, nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go index ff913c934e62..b3a82eb960ca 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go @@ -10,6 +10,7 @@ import ( "time" "github.com/go-kit/kit/log/level" + "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/common/model" @@ -19,6 +20,7 @@ import ( "github.com/cortexproject/cortex/pkg/chunk/cache" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/extract" + "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/spanlogger" "github.com/cortexproject/cortex/pkg/util/validation" "github.com/weaveworks/common/httpgrpc" @@ -57,25 +59,21 @@ type StoreConfig struct { ChunkCacheConfig cache.Config WriteDedupeCacheConfig cache.Config - MinChunkAge time.Duration - CardinalityCacheSize int - CardinalityCacheValidity time.Duration - CardinalityLimit int - + MinChunkAge time.Duration CacheLookupsOlderThan time.Duration } // RegisterFlags adds the flags required to config this to the given FlagSet func (cfg *StoreConfig) RegisterFlags(f *flag.FlagSet) { cfg.ChunkCacheConfig.RegisterFlagsWithPrefix("", "Cache config for chunks. ", f) - cfg.WriteDedupeCacheConfig.RegisterFlagsWithPrefix("store.index-cache-write.", "Cache config for index entry writing. ", f) f.DurationVar(&cfg.MinChunkAge, "store.min-chunk-age", 0, "Minimum time between chunk update and being saved to the store.") - f.IntVar(&cfg.CardinalityCacheSize, "store.cardinality-cache-size", 0, "Size of in-memory cardinality cache, 0 to disable.") - f.DurationVar(&cfg.CardinalityCacheValidity, "store.cardinality-cache-validity", 1*time.Hour, "Period for which entries in the cardinality cache are valid.") - f.IntVar(&cfg.CardinalityLimit, "store.cardinality-limit", 1e5, "Cardinality limit for index queries.") f.DurationVar(&cfg.CacheLookupsOlderThan, "store.cache-lookups-older-than", 0, "Cache index entries older than this period. 0 to disable.") + + // Deprecated. + flagext.DeprecatedFlag(f, "store.cardinality-cache-size", "DEPRECATED. Use store.index-cache-size.enable-fifocache and store.cardinality-cache.fifocache.size instead.") + flagext.DeprecatedFlag(f, "store.cardinality-cache-validity", "DEPRECATED. Use store.index-cache-size.enable-fifocache and store.cardinality-cache.fifocache.duration instead.") } // store implements Store @@ -191,6 +189,10 @@ func (c *store) Get(ctx context.Context, from, through model.Time, allMatchers . return c.getMetricNameChunks(ctx, from, through, matchers, metricName) } +func (c *store) GetChunkRefs(ctx context.Context, from, through model.Time, allMatchers ...*labels.Matcher) ([][]Chunk, []*Fetcher, error) { + return nil, nil, errors.New("not implemented") +} + func (c *store) validateQuery(ctx context.Context, from model.Time, through *model.Time, matchers []*labels.Matcher) (string, []*labels.Matcher, bool, error) { log, ctx := spanlogger.New(ctx, "store.validateQuery") defer log.Span.Finish() @@ -206,7 +208,7 @@ func (c *store) validateQuery(ctx context.Context, from model.Time, through *mod maxQueryLength := c.limits.MaxQueryLength(userID) if maxQueryLength > 0 && (*through).Sub(from) > maxQueryLength { - return "", nil, false, httpgrpc.Errorf(http.StatusBadRequest, "invalid query, length > limit (%s > %s)", (*through).Sub(from), maxQueryLength) + return "", nil, false, httpgrpc.Errorf(http.StatusBadRequest, validation.ErrQueryTooLong, (*through).Sub(from), maxQueryLength) } now := model.Now() diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go index cdfad5a5a743..bd3d21612f1b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go @@ -14,7 +14,8 @@ import ( type Store interface { Put(ctx context.Context, chunks []Chunk) error PutOne(ctx context.Context, from, through model.Time, chunk Chunk) error - Get(tx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([]Chunk, error) + Get(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([]Chunk, error) + GetChunkRefs(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([][]Chunk, []*Fetcher, error) Stop() } @@ -45,7 +46,7 @@ func (c *CompositeStore) AddPeriod(storeCfg StoreConfig, cfg PeriodConfig, index var store Store var err error switch cfg.Schema { - case "v9": + case "v9", "v10": store, err = newSeriesStore(storeCfg, schema, index, chunks, limits) default: store, err = newStore(storeCfg, schema, index, chunks, limits) @@ -88,6 +89,22 @@ func (c compositeStore) Get(ctx context.Context, from, through model.Time, match return results, err } +func (c compositeStore) GetChunkRefs(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([][]Chunk, []*Fetcher, error) { + chunkIDs := [][]Chunk{} + fetchers := []*Fetcher{} + err := c.forStores(from, through, func(from, through model.Time, store Store) error { + ids, fetcher, err := store.GetChunkRefs(ctx, from, through, matchers...) + if err != nil { + return err + } + + chunkIDs = append(chunkIDs, ids...) + fetchers = append(fetchers, fetcher...) + return nil + }) + return chunkIDs, fetchers, err +} + func (c compositeStore) Stop() { for _, store := range c.stores { store.Stop() diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/bigchunk.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/bigchunk.go index 145564e564cc..0282c08e9976 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/bigchunk.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/bigchunk.go @@ -53,15 +53,18 @@ func (b *bigchunk) Add(sample model.SamplePair) ([]Chunk, error) { func (b *bigchunk) addNextChunk(start model.Time) error { // To save memory, we "compact" the previous chunk - the array backing the slice // will be upto 2x too big, and we can save this space. + const chunkCapacityExcess = 32 // don't bother copying if it's within this range if l := len(b.chunks); l > 0 { - c := b.chunks[l-1].XORChunk - buf := make([]byte, len(c.Bytes())) - copy(buf, c.Bytes()) - compacted, err := chunkenc.FromData(chunkenc.EncXOR, buf) - if err != nil { - return err + oldBuf := b.chunks[l-1].XORChunk.Bytes() + if cap(oldBuf) > len(oldBuf)+chunkCapacityExcess { + buf := make([]byte, len(oldBuf)) + copy(buf, oldBuf) + compacted, err := chunkenc.FromData(chunkenc.EncXOR, buf) + if err != nil { + return err + } + b.chunks[l-1].XORChunk = compacted.(*chunkenc.XORChunk) } - b.chunks[l-1].XORChunk = compacted.(*chunkenc.XORChunk) } chunk := chunkenc.NewXORChunk() @@ -110,7 +113,7 @@ func (b *bigchunk) UnmarshalFromBuf(buf []byte) error { return err } - b.chunks = make([]smallChunk, 0, numChunks) + b.chunks = make([]smallChunk, 0, numChunks+1) // allow one extra space in case we want to add new data for i := uint16(0); i < numChunks; i++ { chunkLen, err := r.ReadUint16() if err != nil { @@ -158,8 +161,9 @@ func (b *bigchunk) Len() int { } func (b *bigchunk) Size() int { - sum := 0 + sum := 2 // For the number of sub chunks. for _, c := range b.chunks { + sum += 2 // For the length of the sub chunk. sum += len(c.Bytes()) } return sum diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/fixtures.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/fixtures.go index b6dc21a71fec..9c6898d23fae 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/fixtures.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/fixtures.go @@ -8,23 +8,23 @@ import ( // BenchmarkMetric is a real example from Kubernetes' embedded cAdvisor metrics, lightly obfuscated var BenchmarkMetric = model.Metric{ - model.MetricNameLabel: "container_cpu_usage_seconds_total", - "beta_kubernetes_io_arch": "amd64", - "beta_kubernetes_io_instance_type": "c3.somesize", - "beta_kubernetes_io_os": "linux", - "container_name": "some-name", - "cpu": "cpu01", + model.MetricNameLabel: "container_cpu_usage_seconds_total", + "beta_kubernetes_io_arch": "amd64", + "beta_kubernetes_io_instance_type": "c3.somesize", + "beta_kubernetes_io_os": "linux", + "container_name": "some-name", + "cpu": "cpu01", "failure_domain_beta_kubernetes_io_region": "somewhere-1", "failure_domain_beta_kubernetes_io_zone": "somewhere-1b", - "id": "/kubepods/burstable/pod6e91c467-e4c5-11e7-ace3-0a97ed59c75e/a3c8498918bd6866349fed5a6f8c643b77c91836427fb6327913276ebc6bde28", - "image": "registry/organisation/name@sha256:dca3d877a80008b45d71d7edc4fd2e44c0c8c8e7102ba5cbabec63a374d1d506", - "instance": "ip-111-11-1-11.ec2.internal", - "job": "kubernetes-cadvisor", - "kubernetes_io_hostname": "ip-111-11-1-11", - "monitor": "prod", - "name": "k8s_some-name_some-other-name-5j8s8_kube-system_6e91c467-e4c5-11e7-ace3-0a97ed59c75e_0", - "namespace": "kube-system", - "pod_name": "some-other-name-5j8s8", + "id": "/kubepods/burstable/pod6e91c467-e4c5-11e7-ace3-0a97ed59c75e/a3c8498918bd6866349fed5a6f8c643b77c91836427fb6327913276ebc6bde28", + "image": "registry/organisation/name@sha256:dca3d877a80008b45d71d7edc4fd2e44c0c8c8e7102ba5cbabec63a374d1d506", + "instance": "ip-111-11-1-11.ec2.internal", + "job": "kubernetes-cadvisor", + "kubernetes_io_hostname": "ip-111-11-1-11", + "monitor": "prod", + "name": "k8s_some-name_some-other-name-5j8s8_kube-system_6e91c467-e4c5-11e7-ace3-0a97ed59c75e_0", + "namespace": "kube-system", + "pod_name": "some-other-name-5j8s8", } // DefaultSchemaConfig creates a simple schema config for testing diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_object_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_object_client.go index a43980bde1ac..adf155f6c5e5 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_object_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_object_client.go @@ -114,7 +114,7 @@ func (s *bigtableObjectClient) GetChunks(ctx context.Context, input []chunk.Chun decodeContext := chunk.NewDecodeContext() var processingErr error - var recievedChunks = 0 + var receivedChunks = 0 // rows are returned in key order, not order in row list err := table.ReadRows(ctx, page, func(row bigtable.Row) bool { @@ -130,7 +130,7 @@ func (s *bigtableObjectClient) GetChunks(ctx context.Context, input []chunk.Chun return false } - recievedChunks++ + receivedChunks++ outs <- chunk return true }) @@ -139,8 +139,8 @@ func (s *bigtableObjectClient) GetChunks(ctx context.Context, input []chunk.Chun errs <- processingErr } else if err != nil { errs <- errors.WithStack(err) - } else if recievedChunks < len(page) { - errs <- errors.WithStack(fmt.Errorf("Asked for %d chunks for Bigtable, received %d", len(page), recievedChunks)) + } else if receivedChunks < len(page) { + errs <- errors.WithStack(fmt.Errorf("Asked for %d chunks for Bigtable, received %d", len(page), receivedChunks)) } }(page) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema.go index 37033156fd5f..307f4c1d6030 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema.go @@ -1,6 +1,7 @@ package chunk import ( + "encoding/binary" "errors" "fmt" "strings" @@ -540,7 +541,7 @@ func (v6Entries) GetChunksForSeries(_ Bucket, _ []byte) ([]IndexQuery, error) { type v9Entries struct { } -func (e v9Entries) GetWriteEntries(bucket Bucket, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) { +func (v9Entries) GetWriteEntries(bucket Bucket, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) { return nil, ErrNotSupported } @@ -630,3 +631,108 @@ func (v9Entries) GetChunksForSeries(bucket Bucket, seriesID []byte) ([]IndexQuer }, }, nil } + +// v10Entries builds on v9 by sharding index rows to reduce their size. +type v10Entries struct { + rowShards uint32 +} + +func (v10Entries) GetWriteEntries(bucket Bucket, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) { + return nil, ErrNotSupported +} + +func (s v10Entries) GetLabelWriteEntries(bucket Bucket, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) { + seriesID := sha256bytes(labels.String()) + + // read first 32 bits of the hash and use this to calculate the shard + shard := binary.BigEndian.Uint32(seriesID) % s.rowShards + + entries := []IndexEntry{ + // Entry for metricName -> seriesID + { + TableName: bucket.tableName, + HashValue: fmt.Sprintf("%02d:%s:%s", shard, bucket.hashKey, string(metricName)), + RangeValue: encodeRangeKey(seriesID, nil, nil, seriesRangeKeyV1), + }, + } + + // Entries for metricName:labelName -> hash(value):seriesID + // We use a hash of the value to limit its length. + for key, value := range labels { + if key == model.MetricNameLabel { + continue + } + valueHash := sha256bytes(string(value)) + entries = append(entries, IndexEntry{ + TableName: bucket.tableName, + HashValue: fmt.Sprintf("%02d:%s:%s:%s", shard, bucket.hashKey, metricName, key), + RangeValue: encodeRangeKey(valueHash, seriesID, nil, labelSeriesRangeKeyV1), + Value: []byte(value), + }) + } + + return entries, nil +} + +func (v10Entries) GetChunkWriteEntries(bucket Bucket, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) { + seriesID := sha256bytes(labels.String()) + encodedThroughBytes := encodeTime(bucket.through) + + entries := []IndexEntry{ + // Entry for seriesID -> chunkID + { + TableName: bucket.tableName, + HashValue: bucket.hashKey + ":" + string(seriesID), + RangeValue: encodeRangeKey(encodedThroughBytes, nil, []byte(chunkID), chunkTimeRangeKeyV3), + }, + } + + return entries, nil +} + +func (s v10Entries) GetReadMetricQueries(bucket Bucket, metricName model.LabelValue) ([]IndexQuery, error) { + result := make([]IndexQuery, 0, s.rowShards) + for i := uint32(0); i < s.rowShards; i++ { + result = append(result, IndexQuery{ + TableName: bucket.tableName, + HashValue: fmt.Sprintf("%02d:%s:%s", i, bucket.hashKey, string(metricName)), + }) + } + return result, nil +} + +func (s v10Entries) GetReadMetricLabelQueries(bucket Bucket, metricName model.LabelValue, labelName model.LabelName) ([]IndexQuery, error) { + result := make([]IndexQuery, 0, s.rowShards) + for i := uint32(0); i < s.rowShards; i++ { + result = append(result, IndexQuery{ + TableName: bucket.tableName, + HashValue: fmt.Sprintf("%02d:%s:%s:%s", i, bucket.hashKey, metricName, labelName), + }) + } + return result, nil +} + +func (s v10Entries) GetReadMetricLabelValueQueries(bucket Bucket, metricName model.LabelValue, labelName model.LabelName, labelValue model.LabelValue) ([]IndexQuery, error) { + valueHash := sha256bytes(string(labelValue)) + result := make([]IndexQuery, 0, s.rowShards) + for i := uint32(0); i < s.rowShards; i++ { + result = append(result, IndexQuery{ + TableName: bucket.tableName, + HashValue: fmt.Sprintf("%02d:%s:%s:%s", i, bucket.hashKey, metricName, labelName), + RangeValueStart: encodeRangeKey(valueHash), + ValueEqual: []byte(labelValue), + }) + } + return result, nil +} + +func (v10Entries) GetChunksForSeries(bucket Bucket, seriesID []byte) ([]IndexQuery, error) { + encodedFromBytes := encodeTime(bucket.from) + return []IndexQuery{ + { + TableName: bucket.tableName, + HashValue: bucket.hashKey + ":" + string(seriesID), + RangeValueStart: encodeRangeKey(encodedFromBytes), + }, + }, nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go index 1fa5db0cca7a..ec01b8415d00 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go @@ -3,17 +3,17 @@ package chunk import ( "flag" "fmt" - "github.com/go-kit/kit/log/level" "os" "strconv" "time" + "github.com/go-kit/kit/log/level" "github.com/prometheus/common/model" + "github.com/weaveworks/common/mtime" yaml "gopkg.in/yaml.v2" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" - "github.com/weaveworks/common/mtime" ) const ( @@ -32,6 +32,7 @@ type PeriodConfig struct { Schema string `yaml:"schema"` IndexTables PeriodicTableConfig `yaml:"index"` ChunkTables PeriodicTableConfig `yaml:"chunks,omitempty"` + RowShards uint32 `yaml:"row_shards"` } // SchemaConfig contains the config for our chunk index schemas @@ -181,6 +182,15 @@ func (cfg PeriodConfig) createSchema() Schema { s = schema{cfg.dailyBuckets, v6Entries{}} case "v9": s = schema{cfg.dailyBuckets, v9Entries{}} + case "v10": + rowShards := uint32(16) + if cfg.RowShards > 0 { + rowShards = cfg.RowShards + } + + s = schema{cfg.dailyBuckets, v10Entries{ + rowShards: rowShards, + }} } return s } @@ -424,7 +434,7 @@ func (cfg *PeriodicTableConfig) periodicTables(from, through model.Time, pCfg Pr // ChunkTableFor calculates the chunk table shard for a given point in time. func (cfg SchemaConfig) ChunkTableFor(t model.Time) (string, error) { for i := range cfg.Configs { - if t > cfg.Configs[i].From && (i+1 == len(cfg.Configs) || t < cfg.Configs[i+1].From) { + if t >= cfg.Configs[i].From && (i+1 == len(cfg.Configs) || t < cfg.Configs[i+1].From) { return cfg.Configs[i].ChunkTables.TableFor(t), nil } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go index a6c837c35afd..4a0b742e6af2 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go @@ -23,7 +23,9 @@ import ( ) var ( - errCardinalityExceeded = errors.New("cardinality limit exceeded") + // ErrCardinalityExceeded is returned when the user reads a row that + // is too large. + ErrCardinalityExceeded = errors.New("cardinality limit exceeded") indexLookupsPerQuery = promauto.NewHistogram(prometheus.HistogramOpts{ Namespace: "cortex", @@ -57,8 +59,6 @@ var ( // seriesStore implements Store type seriesStore struct { store - cardinalityCache *cache.FifoCache - writeDedupeCache cache.Cache } @@ -89,10 +89,6 @@ func newSeriesStore(cfg StoreConfig, schema Schema, index IndexClient, chunks Ob limits: limits, Fetcher: fetcher, }, - cardinalityCache: cache.NewFifoCache("cardinality", cache.FifoCacheConfig{ - Size: cfg.CardinalityCacheSize, - Validity: cfg.CardinalityCacheValidity, - }), writeDedupeCache: writeDedupeCache, }, nil } @@ -108,38 +104,17 @@ func (c *seriesStore) Get(ctx context.Context, from, through model.Time, allMatc return nil, err } - // Validate the query is within reasonable bounds. - metricName, matchers, shortcut, err := c.validateQuery(ctx, from, &through, allMatchers) - if err != nil { - return nil, err - } else if shortcut { - return nil, nil - } - - level.Debug(log).Log("metric", metricName) - - // Fetch the series IDs from the index, based on non-empty matchers from - // the query. - _, matchers = util.SplitFiltersAndMatchers(matchers) - seriesIDs, err := c.lookupSeriesByMetricNameMatchers(ctx, from, through, metricName, matchers) + chks, _, err := c.GetChunkRefs(ctx, from, through, allMatchers...) if err != nil { return nil, err } - level.Debug(log).Log("series-ids", len(seriesIDs)) - // Lookup the series in the index to get the chunks. - chunkIDs, err := c.lookupChunksBySeries(ctx, from, through, seriesIDs) - if err != nil { - level.Error(log).Log("msg", "lookupChunksBySeries", "err", err) - return nil, err + if len(chks) == 0 { + // Shortcut + return nil, nil } - level.Debug(log).Log("chunk-ids", len(chunkIDs)) - chunks, err := c.convertChunkIDsToChunks(ctx, chunkIDs) - if err != nil { - level.Error(log).Log("err", "convertChunkIDsToChunks", "err", err) - return nil, err - } + chunks := chks[0] // Filter out chunks that are not in the selected time range. filtered, keys := filterChunksByTime(from, through, chunks) level.Debug(log).Log("chunks-post-filtering", len(chunks)) @@ -147,8 +122,8 @@ func (c *seriesStore) Get(ctx context.Context, from, through model.Time, allMatc // Protect ourselves against OOMing. maxChunksPerQuery := c.limits.MaxChunksPerQuery(userID) - if maxChunksPerQuery > 0 && len(chunkIDs) > maxChunksPerQuery { - err := httpgrpc.Errorf(http.StatusBadRequest, "Query %v fetched too many chunks (%d > %d)", allMatchers, len(chunkIDs), maxChunksPerQuery) + if maxChunksPerQuery > 0 && len(chunks) > maxChunksPerQuery { + err := httpgrpc.Errorf(http.StatusBadRequest, "Query %v fetched too many chunks (%d > %d)", allMatchers, len(chunks), maxChunksPerQuery) level.Error(log).Log("err", err) return nil, err } @@ -165,6 +140,46 @@ func (c *seriesStore) Get(ctx context.Context, from, through model.Time, allMatc return filteredChunks, nil } +func (c *seriesStore) GetChunkRefs(ctx context.Context, from, through model.Time, allMatchers ...*labels.Matcher) ([][]Chunk, []*Fetcher, error) { + log, ctx := spanlogger.New(ctx, "SeriesStore.GetChunkRefs") + defer log.Span.Finish() + + // Validate the query is within reasonable bounds. + metricName, matchers, shortcut, err := c.validateQuery(ctx, from, &through, allMatchers) + if err != nil { + return nil, nil, err + } else if shortcut { + return nil, nil, nil + } + + level.Debug(log).Log("metric", metricName) + + // Fetch the series IDs from the index, based on non-empty matchers from + // the query. + _, matchers = util.SplitFiltersAndMatchers(matchers) + seriesIDs, err := c.lookupSeriesByMetricNameMatchers(ctx, from, through, metricName, matchers) + if err != nil { + return nil, nil, err + } + level.Debug(log).Log("series-ids", len(seriesIDs)) + + // Lookup the series in the index to get the chunks. + chunkIDs, err := c.lookupChunksBySeries(ctx, from, through, seriesIDs) + if err != nil { + level.Error(log).Log("msg", "lookupChunksBySeries", "err", err) + return nil, nil, err + } + level.Debug(log).Log("chunk-ids", len(chunkIDs)) + + chunks, err := c.convertChunkIDsToChunks(ctx, chunkIDs) + if err != nil { + level.Error(log).Log("op", "convertChunkIDsToChunks", "err", err) + return nil, nil, err + } + + return [][]Chunk{chunks}, []*Fetcher{c.store.Fetcher}, nil +} + func (c *seriesStore) lookupSeriesByMetricNameMatchers(ctx context.Context, from, through model.Time, metricName string, matchers []*labels.Matcher) ([]string, error) { log, ctx := spanlogger.New(ctx, "SeriesStore.lookupSeriesByMetricNameMatchers", "metricName", metricName, "matchers", len(matchers)) defer log.Span.Finish() @@ -210,15 +225,21 @@ func (c *seriesStore) lookupSeriesByMetricNameMatchers(ctx context.Context, from ids = intersectStrings(ids, incoming) } case err := <-incomingErrors: - if err == errCardinalityExceeded { + // The idea is that if we have 2 matchers, and if one returns a lot of + // series and the other returns only 10 (a few), we don't lookup the first one at all. + // We just manually filter through the 10 series again using "filterChunksByMatchers", + // saving us from looking up and intersecting a lot of series. + if err == ErrCardinalityExceeded { cardinalityExceededErrors++ } else { lastErr = err } } } + + // But if every single matcher returns a lot of series, then it makes sense to abort the query. if cardinalityExceededErrors == len(matchers) { - return nil, errCardinalityExceeded + return nil, ErrCardinalityExceeded } else if lastErr != nil { return nil, lastErr } @@ -251,36 +272,12 @@ func (c *seriesStore) lookupSeriesByMetricNameMatcher(ctx context.Context, from, } level.Debug(log).Log("queries", len(queries)) - for _, query := range queries { - value, ok := c.cardinalityCache.Get(ctx, query.HashValue) - if !ok { - continue - } - cardinality := value.(int) - if cardinality > c.cfg.CardinalityLimit { - return nil, errCardinalityExceeded - } - } - entries, err := c.lookupEntriesByQueries(ctx, queries) if err != nil { return nil, err } level.Debug(log).Log("entries", len(entries)) - // TODO This is not correct, will overcount for queries > 24hrs - keys := make([]string, 0, len(queries)) - values := make([]interface{}, 0, len(queries)) - for _, query := range queries { - keys = append(keys, query.HashValue) - values = append(values, len(entries)) - } - c.cardinalityCache.Put(ctx, keys, values) - - if len(entries) > c.cfg.CardinalityLimit { - return nil, errCardinalityExceeded - } - ids, err := c.parseIndexEntries(ctx, entries, matcher) if err != nil { return nil, err diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/wire/bytes.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/bytes.go similarity index 97% rename from vendor/github.com/cortexproject/cortex/pkg/util/wire/bytes.go rename to vendor/github.com/cortexproject/cortex/pkg/chunk/storage/bytes.go index dfabadd8e061..c4804995ff82 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/wire/bytes.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/bytes.go @@ -1,4 +1,4 @@ -package wire +package storage import ( "bytes" diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_fixtures.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_fixtures.go index cc36d5cb27d8..1469e92d75ed 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_fixtures.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_fixtures.go @@ -3,6 +3,9 @@ package storage import ( "time" + "github.com/cortexproject/cortex/pkg/util/flagext" + "github.com/cortexproject/cortex/pkg/util/validation" + "github.com/cortexproject/cortex/pkg/chunk/cache" "github.com/cortexproject/cortex/pkg/chunk/gcp" @@ -16,11 +19,15 @@ type fixture struct { func (f fixture) Name() string { return "caching-store" } func (f fixture) Clients() (chunk.IndexClient, chunk.ObjectClient, chunk.TableClient, chunk.SchemaConfig, error) { + limits, err := defaultLimits() + if err != nil { + return nil, nil, nil, chunk.SchemaConfig{}, err + } indexClient, objectClient, tableClient, schemaConfig, err := f.fixture.Clients() indexClient = newCachingIndexClient(indexClient, cache.NewFifoCache("index-fifo", cache.FifoCacheConfig{ Size: 500, Validity: 5 * time.Minute, - }), 5*time.Minute) + }), 5*time.Minute, limits) return indexClient, objectClient, tableClient, schemaConfig, err } func (f fixture) Teardown() error { return f.fixture.Teardown() } @@ -29,3 +36,9 @@ func (f fixture) Teardown() error { return f.fixture.Teardown() } var Fixtures = []testutils.Fixture{ fixture{gcp.Fixtures[0]}, } + +func defaultLimits() (*validation.Overrides, error) { + var defaults validation.Limits + flagext.DefaultValues(&defaults) + return validation.NewOverrides(defaults) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go index fcb9a02204a1..c4df850b885b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go @@ -5,15 +5,18 @@ import ( "sync" "time" + "github.com/go-kit/kit/log/level" + proto "github.com/golang/protobuf/proto" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/weaveworks/common/user" + "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/chunk/cache" chunk_util "github.com/cortexproject/cortex/pkg/chunk/util" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/spanlogger" - "github.com/go-kit/kit/log/level" - proto "github.com/golang/protobuf/proto" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/cortexproject/cortex/pkg/util/validation" ) var ( @@ -43,9 +46,10 @@ type cachingIndexClient struct { chunk.IndexClient cache cache.Cache validity time.Duration + limits *validation.Overrides } -func newCachingIndexClient(client chunk.IndexClient, c cache.Cache, validity time.Duration) chunk.IndexClient { +func newCachingIndexClient(client chunk.IndexClient, c cache.Cache, validity time.Duration, limits *validation.Overrides) chunk.IndexClient { if c == nil { return client } @@ -54,6 +58,7 @@ func newCachingIndexClient(client chunk.IndexClient, c cache.Cache, validity tim IndexClient: client, cache: cache.NewSnappy(c), validity: validity, + limits: limits, } } @@ -65,6 +70,12 @@ func (s *cachingIndexClient) QueryPages(ctx context.Context, queries []chunk.Ind // We cache the entire row, so filter client side. callback = chunk_util.QueryFilter(callback) + userID, err := user.ExtractOrgID(ctx) + if err != nil { + return err + } + cardinalityLimit := int32(s.limits.CardinalityLimit(userID)) + // Build list of keys to lookup in the cache. keys := make([]string, 0, len(queries)) queriesByKey := make(map[string][]chunk.IndexQuery, len(queries)) @@ -76,6 +87,10 @@ func (s *cachingIndexClient) QueryPages(ctx context.Context, queries []chunk.Ind batches, misses := s.cacheFetch(ctx, keys) for _, batch := range batches { + if cardinalityLimit > 0 && batch.Cardinality > cardinalityLimit { + return chunk.ErrCardinalityExceeded + } + queries := queriesByKey[batch.Key] for _, query := range queries { callback(query, batch) @@ -115,7 +130,7 @@ func (s *cachingIndexClient) QueryPages(ctx context.Context, queries []chunk.Ind results[key] = rb } - err := s.IndexClient.QueryPages(ctx, cacheableMissed, func(cacheableQuery chunk.IndexQuery, r chunk.ReadBatch) bool { + err = s.IndexClient.QueryPages(ctx, cacheableMissed, func(cacheableQuery chunk.IndexQuery, r chunk.ReadBatch) bool { resultsMtx.Lock() defer resultsMtx.Unlock() key := queryKey(cacheableQuery) @@ -135,9 +150,20 @@ func (s *cachingIndexClient) QueryPages(ctx context.Context, queries []chunk.Ind defer resultsMtx.Unlock() keys := make([]string, 0, len(results)) batches := make([]ReadBatch, 0, len(results)) + var cardinalityErr error for key, batch := range results { + cardinality := int32(len(batch.Entries)) + if cardinalityLimit > 0 && cardinality > cardinalityLimit { + batch.Cardinality = cardinality + batch.Entries = nil + cardinalityErr = chunk.ErrCardinalityExceeded + } + keys = append(keys, key) batches = append(batches, batch) + if cardinalityErr != nil { + continue + } queries := queriesByKey[key] for _, query := range queries { @@ -145,8 +171,8 @@ func (s *cachingIndexClient) QueryPages(ctx context.Context, queries []chunk.Ind } } s.cacheStore(ctx, keys, batches) + return cardinalityErr } - return nil } // Iterator implements chunk.ReadBatch. @@ -250,7 +276,6 @@ func (s *cachingIndexClient) cacheFetch(ctx context.Context, keys []string) (bat } if readBatch.Expiry != 0 && time.Now().After(time.Unix(0, readBatch.Expiry)) { - level.Debug(log).Log("msg", "dropping index cache entry due to expiration", "key", key, "readBatch.Key", readBatch.Key, "expiry", time.Unix(0, readBatch.Expiry)) continue } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.pb.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.pb.go index 7aa411fe2cd1..761e22f4b80a 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.pb.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.pb.go @@ -3,17 +3,15 @@ package storage -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "github.com/gogo/protobuf/gogoproto" - -import github_com_cortexproject_cortex_pkg_util_wire "github.com/cortexproject/cortex/pkg/util/wire" - -import strings "strings" -import reflect "reflect" - -import io "io" +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -27,14 +25,14 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package type Entry struct { - Column github_com_cortexproject_cortex_pkg_util_wire.Bytes `protobuf:"bytes,1,opt,name=Column,json=column,proto3,customtype=github.com/cortexproject/cortex/pkg/util/wire.Bytes" json:"Column"` - Value github_com_cortexproject_cortex_pkg_util_wire.Bytes `protobuf:"bytes,2,opt,name=Value,json=value,proto3,customtype=github.com/cortexproject/cortex/pkg/util/wire.Bytes" json:"Value"` + Column Bytes `protobuf:"bytes,1,opt,name=Column,json=column,proto3,customtype=Bytes" json:"Column"` + Value Bytes `protobuf:"bytes,2,opt,name=Value,json=value,proto3,customtype=Bytes" json:"Value"` } func (m *Entry) Reset() { *m = Entry{} } func (*Entry) ProtoMessage() {} func (*Entry) Descriptor() ([]byte, []int) { - return fileDescriptor_caching_index_client_2f4bf220288f700f, []int{0} + return fileDescriptor_a60039d4a2d816f6, []int{0} } func (m *Entry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -51,8 +49,8 @@ func (m *Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } -func (dst *Entry) XXX_Merge(src proto.Message) { - xxx_messageInfo_Entry.Merge(dst, src) +func (m *Entry) XXX_Merge(src proto.Message) { + xxx_messageInfo_Entry.Merge(m, src) } func (m *Entry) XXX_Size() int { return m.Size() @@ -64,16 +62,19 @@ func (m *Entry) XXX_DiscardUnknown() { var xxx_messageInfo_Entry proto.InternalMessageInfo type ReadBatch struct { - Entries []Entry `protobuf:"bytes,1,rep,name=entries" json:"entries"` + Entries []Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries"` Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` // The time at which the key expires. Expiry int64 `protobuf:"varint,3,opt,name=expiry,proto3" json:"expiry,omitempty"` + // The number of entries; used for cardinality limiting. + // entries will be empty when this is set. + Cardinality int32 `protobuf:"varint,4,opt,name=cardinality,proto3" json:"cardinality,omitempty"` } func (m *ReadBatch) Reset() { *m = ReadBatch{} } func (*ReadBatch) ProtoMessage() {} func (*ReadBatch) Descriptor() ([]byte, []int) { - return fileDescriptor_caching_index_client_2f4bf220288f700f, []int{1} + return fileDescriptor_a60039d4a2d816f6, []int{1} } func (m *ReadBatch) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -90,8 +91,8 @@ func (m *ReadBatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } -func (dst *ReadBatch) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadBatch.Merge(dst, src) +func (m *ReadBatch) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadBatch.Merge(m, src) } func (m *ReadBatch) XXX_Size() int { return m.Size() @@ -123,10 +124,47 @@ func (m *ReadBatch) GetExpiry() int64 { return 0 } +func (m *ReadBatch) GetCardinality() int32 { + if m != nil { + return m.Cardinality + } + return 0 +} + func init() { proto.RegisterType((*Entry)(nil), "storage.Entry") proto.RegisterType((*ReadBatch)(nil), "storage.ReadBatch") } + +func init() { + proto.RegisterFile("github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.proto", fileDescriptor_a60039d4a2d816f6) +} + +var fileDescriptor_a60039d4a2d816f6 = []byte{ + // 335 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xb1, 0x4e, 0xc3, 0x30, + 0x00, 0x44, 0x63, 0xd2, 0xa4, 0xaa, 0x0b, 0x08, 0x65, 0x40, 0x11, 0x83, 0x1b, 0x15, 0x21, 0x65, + 0x21, 0x91, 0x80, 0x2f, 0x08, 0x62, 0x63, 0x0a, 0x12, 0x6b, 0xe5, 0xba, 0x26, 0x31, 0x4d, 0xed, + 0xc8, 0x75, 0x50, 0xb3, 0xb1, 0xb1, 0xf2, 0x19, 0x7c, 0x4a, 0xc7, 0x8e, 0x15, 0x43, 0x45, 0xdd, + 0x85, 0xb1, 0x9f, 0x80, 0x6a, 0x82, 0xd4, 0x81, 0xed, 0x9e, 0xef, 0x7c, 0x67, 0x19, 0xde, 0x67, + 0x4c, 0xe5, 0xd5, 0x30, 0x22, 0x62, 0x12, 0x13, 0x21, 0x15, 0x9d, 0x95, 0x52, 0x3c, 0x53, 0xa2, + 0x1a, 0x8a, 0xcb, 0x71, 0x16, 0x93, 0xbc, 0xe2, 0xe3, 0x78, 0xaa, 0x84, 0xc4, 0x19, 0x8d, 0x09, + 0x26, 0x39, 0xe3, 0xd9, 0x80, 0xf1, 0x11, 0x9d, 0x0d, 0x48, 0xc1, 0x28, 0x57, 0x51, 0x29, 0x85, + 0x12, 0x5e, 0xbb, 0xc9, 0x9c, 0x5d, 0xee, 0xd5, 0x66, 0x22, 0x13, 0xb1, 0xf1, 0x87, 0xd5, 0x93, + 0x21, 0x03, 0x46, 0xfd, 0xde, 0xeb, 0x3f, 0x40, 0xe7, 0x8e, 0x2b, 0x59, 0x7b, 0x17, 0xd0, 0xbd, + 0x15, 0x45, 0x35, 0xe1, 0x3e, 0x08, 0x40, 0x78, 0x98, 0x1c, 0xcd, 0x57, 0x3d, 0xeb, 0x73, 0xd5, + 0x73, 0x92, 0x5a, 0xd1, 0x69, 0xea, 0x12, 0x63, 0x7a, 0xe7, 0xd0, 0x79, 0xc4, 0x45, 0x45, 0xfd, + 0x83, 0xff, 0x52, 0xce, 0xcb, 0xce, 0xeb, 0xbf, 0x01, 0xd8, 0x49, 0x29, 0x1e, 0x25, 0x58, 0x91, + 0xdc, 0x8b, 0x60, 0x9b, 0x72, 0x25, 0x19, 0x9d, 0xfa, 0x20, 0xb0, 0xc3, 0xee, 0xd5, 0x71, 0xd4, + 0x3c, 0x36, 0x32, 0xd3, 0x49, 0x6b, 0x57, 0x92, 0xfe, 0x85, 0xbc, 0x13, 0x68, 0x8f, 0x69, 0x6d, + 0x06, 0x3a, 0xe9, 0x4e, 0x7a, 0xa7, 0xd0, 0xa5, 0xb3, 0x92, 0xc9, 0xda, 0xb7, 0x03, 0x10, 0xda, + 0x69, 0x43, 0x5e, 0x00, 0xbb, 0x04, 0xcb, 0x11, 0xe3, 0xb8, 0x60, 0xaa, 0xf6, 0x5b, 0x01, 0x08, + 0x9d, 0x74, 0xff, 0x28, 0xb9, 0x59, 0xac, 0x91, 0xb5, 0x5c, 0x23, 0x6b, 0xbb, 0x46, 0xe0, 0x55, + 0x23, 0xf0, 0xa1, 0x11, 0x98, 0x6b, 0x04, 0x16, 0x1a, 0x81, 0x2f, 0x8d, 0xc0, 0xb7, 0x46, 0xd6, + 0x56, 0x23, 0xf0, 0xbe, 0x41, 0xd6, 0x62, 0x83, 0xac, 0xe5, 0x06, 0x59, 0x43, 0xd7, 0xfc, 0xcd, + 0xf5, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4b, 0xd2, 0x5d, 0xd9, 0xa3, 0x01, 0x00, 0x00, +} + func (this *Entry) Equal(that interface{}) bool { if that == nil { return this == nil @@ -187,6 +225,9 @@ func (this *ReadBatch) Equal(that interface{}) bool { if this.Expiry != that1.Expiry { return false } + if this.Cardinality != that1.Cardinality { + return false + } return true } func (this *Entry) GoString() string { @@ -204,7 +245,7 @@ func (this *ReadBatch) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 7) + s := make([]string, 0, 8) s = append(s, "&storage.ReadBatch{") if this.Entries != nil { vs := make([]*Entry, len(this.Entries)) @@ -215,6 +256,7 @@ func (this *ReadBatch) GoString() string { } s = append(s, "Key: "+fmt.Sprintf("%#v", this.Key)+",\n") s = append(s, "Expiry: "+fmt.Sprintf("%#v", this.Expiry)+",\n") + s = append(s, "Cardinality: "+fmt.Sprintf("%#v", this.Cardinality)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -244,17 +286,17 @@ func (m *Entry) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintCachingIndexClient(dAtA, i, uint64(m.Column.Size())) - n1, err := m.Column.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + n1, err1 := m.Column.MarshalTo(dAtA[i:]) + if err1 != nil { + return 0, err1 } i += n1 dAtA[i] = 0x12 i++ i = encodeVarintCachingIndexClient(dAtA, i, uint64(m.Value.Size())) - n2, err := m.Value.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + n2, err2 := m.Value.MarshalTo(dAtA[i:]) + if err2 != nil { + return 0, err2 } i += n2 return i, nil @@ -298,6 +340,11 @@ func (m *ReadBatch) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintCachingIndexClient(dAtA, i, uint64(m.Expiry)) } + if m.Cardinality != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintCachingIndexClient(dAtA, i, uint64(m.Cardinality)) + } return i, nil } @@ -342,6 +389,9 @@ func (m *ReadBatch) Size() (n int) { if m.Expiry != 0 { n += 1 + sovCachingIndexClient(uint64(m.Expiry)) } + if m.Cardinality != 0 { + n += 1 + sovCachingIndexClient(uint64(m.Cardinality)) + } return n } @@ -373,10 +423,16 @@ func (this *ReadBatch) String() string { if this == nil { return "nil" } + repeatedStringForEntries := "[]Entry{" + for _, f := range this.Entries { + repeatedStringForEntries += strings.Replace(strings.Replace(f.String(), "Entry", "Entry", 1), `&`, ``, 1) + "," + } + repeatedStringForEntries += "}" s := strings.Join([]string{`&ReadBatch{`, - `Entries:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Entries), "Entry", "Entry", 1), `&`, ``, 1) + `,`, + `Entries:` + repeatedStringForEntries + `,`, `Key:` + fmt.Sprintf("%v", this.Key) + `,`, `Expiry:` + fmt.Sprintf("%v", this.Expiry) + `,`, + `Cardinality:` + fmt.Sprintf("%v", this.Cardinality) + `,`, `}`, }, "") return s @@ -404,7 +460,7 @@ func (m *Entry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -432,7 +488,7 @@ func (m *Entry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -441,6 +497,9 @@ func (m *Entry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthCachingIndexClient } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCachingIndexClient + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -462,7 +521,7 @@ func (m *Entry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -471,6 +530,9 @@ func (m *Entry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthCachingIndexClient } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCachingIndexClient + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -487,6 +549,9 @@ func (m *Entry) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthCachingIndexClient } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCachingIndexClient + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -514,7 +579,7 @@ func (m *ReadBatch) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -542,7 +607,7 @@ func (m *ReadBatch) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -551,6 +616,9 @@ func (m *ReadBatch) Unmarshal(dAtA []byte) error { return ErrInvalidLengthCachingIndexClient } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCachingIndexClient + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -573,7 +641,7 @@ func (m *ReadBatch) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -583,6 +651,9 @@ func (m *ReadBatch) Unmarshal(dAtA []byte) error { return ErrInvalidLengthCachingIndexClient } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCachingIndexClient + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -602,7 +673,26 @@ func (m *ReadBatch) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Expiry |= (int64(b) & 0x7F) << shift + m.Expiry |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Cardinality", wireType) + } + m.Cardinality = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCachingIndexClient + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Cardinality |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -616,6 +706,9 @@ func (m *ReadBatch) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthCachingIndexClient } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCachingIndexClient + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -682,10 +775,13 @@ func skipCachingIndexClient(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthCachingIndexClient } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthCachingIndexClient + } return iNdEx, nil case 3: for { @@ -714,6 +810,9 @@ func skipCachingIndexClient(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthCachingIndexClient + } } return iNdEx, nil case 4: @@ -732,32 +831,3 @@ var ( ErrInvalidLengthCachingIndexClient = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowCachingIndexClient = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.proto", fileDescriptor_caching_index_client_2f4bf220288f700f) -} - -var fileDescriptor_caching_index_client_2f4bf220288f700f = []byte{ - // 331 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x90, 0xb1, 0x4e, 0xeb, 0x30, - 0x14, 0x86, 0xe3, 0x9b, 0xdb, 0x54, 0x35, 0x08, 0xa1, 0x0c, 0x28, 0x62, 0x70, 0xab, 0x4e, 0x5d, - 0x88, 0x25, 0xca, 0xc6, 0x16, 0xc4, 0xc6, 0x42, 0x90, 0x58, 0xab, 0xd4, 0x3d, 0x24, 0xa6, 0xa9, - 0x1d, 0xb9, 0x0e, 0x34, 0x1b, 0x8f, 0xc0, 0x63, 0xb0, 0xf1, 0x1a, 0x1d, 0x3b, 0x56, 0x0c, 0x15, - 0x75, 0x17, 0xc6, 0x3e, 0x02, 0xaa, 0x09, 0x12, 0x23, 0x12, 0xdb, 0xf9, 0xe4, 0xe3, 0xcf, 0xbf, - 0x7f, 0x7c, 0x95, 0x72, 0x9d, 0x95, 0xc3, 0x90, 0xc9, 0x09, 0x65, 0x52, 0x69, 0x98, 0x15, 0x4a, - 0xde, 0x03, 0xd3, 0x35, 0xd1, 0x62, 0x9c, 0x52, 0x96, 0x95, 0x62, 0x4c, 0xa7, 0x5a, 0xaa, 0x24, - 0x05, 0xca, 0x12, 0x96, 0x71, 0x91, 0x0e, 0xb8, 0x18, 0xc1, 0x6c, 0xc0, 0x72, 0x0e, 0x42, 0x87, - 0x85, 0x92, 0x5a, 0xfa, 0xcd, 0x7a, 0xe7, 0xf8, 0xe4, 0x87, 0x36, 0x95, 0xa9, 0xa4, 0xf6, 0x7c, - 0x58, 0xde, 0x59, 0xb2, 0x60, 0xa7, 0xaf, 0x7b, 0xdd, 0x57, 0x84, 0x1b, 0x97, 0x42, 0xab, 0xca, - 0xbf, 0xc1, 0xde, 0x85, 0xcc, 0xcb, 0x89, 0x08, 0x50, 0x07, 0xf5, 0xf6, 0xa3, 0xf3, 0xf9, 0xaa, - 0xed, 0xbc, 0xad, 0xda, 0xfd, 0xdf, 0xe4, 0x2c, 0x35, 0xcf, 0xe9, 0x23, 0x57, 0x10, 0x46, 0x95, - 0x86, 0x69, 0xec, 0x31, 0xab, 0xf2, 0xaf, 0x71, 0xe3, 0x36, 0xc9, 0x4b, 0x08, 0xfe, 0xfd, 0xdd, - 0xd9, 0x78, 0xd8, 0x99, 0xba, 0x80, 0x5b, 0x31, 0x24, 0xa3, 0x28, 0xd1, 0x2c, 0xf3, 0x43, 0xdc, - 0x04, 0xa1, 0x15, 0x87, 0x69, 0x80, 0x3a, 0x6e, 0x6f, 0xef, 0xf4, 0x20, 0xac, 0x8b, 0x08, 0xed, - 0xaf, 0xa2, 0xff, 0xbb, 0x17, 0xe3, 0xef, 0x25, 0xff, 0x10, 0xbb, 0x63, 0xa8, 0x6c, 0x9a, 0x56, - 0xbc, 0x1b, 0xfd, 0x23, 0xec, 0xc1, 0xac, 0xe0, 0xaa, 0x0a, 0xdc, 0x0e, 0xea, 0xb9, 0x71, 0x4d, - 0xd1, 0xd9, 0x62, 0x4d, 0x9c, 0xe5, 0x9a, 0x38, 0xdb, 0x35, 0x41, 0x4f, 0x86, 0xa0, 0x17, 0x43, - 0xd0, 0xdc, 0x10, 0xb4, 0x30, 0x04, 0xbd, 0x1b, 0x82, 0x3e, 0x0c, 0x71, 0xb6, 0x86, 0xa0, 0xe7, - 0x0d, 0x71, 0x16, 0x1b, 0xe2, 0x2c, 0x37, 0xc4, 0x19, 0x7a, 0xb6, 0xd5, 0xfe, 0x67, 0x00, 0x00, - 0x00, 0xff, 0xff, 0x95, 0x6d, 0x6d, 0xd0, 0xdd, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.proto b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.proto index 1c22c94c8ab5..22a9d01ffaff 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.proto +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.proto @@ -8,8 +8,8 @@ option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; message Entry { - bytes Column = 1 [(gogoproto.customtype) = "github.com/cortexproject/cortex/pkg/util/wire.Bytes", (gogoproto.nullable) = false]; - bytes Value = 2 [(gogoproto.customtype) = "github.com/cortexproject/cortex/pkg/util/wire.Bytes", (gogoproto.nullable) = false]; + bytes Column = 1 [(gogoproto.customtype) = "Bytes", (gogoproto.nullable) = false]; + bytes Value = 2 [(gogoproto.customtype) = "Bytes", (gogoproto.nullable) = false]; } message ReadBatch { @@ -18,4 +18,8 @@ message ReadBatch { // The time at which the key expires. int64 expiry = 3; + + // The number of entries; used for cardinality limiting. + // entries will be empty when this is set. + int32 cardinality = 4; } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go index d45ed09de02e..a0fd1b41406a 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go @@ -98,7 +98,7 @@ func NewStore(cfg Config, storeCfg chunk.StoreConfig, schemaCfg chunk.SchemaConf if err != nil { return nil, errors.Wrap(err, "error creating index client") } - index = newCachingIndexClient(index, tieredCache, cfg.IndexCacheValidity) + index = newCachingIndexClient(index, tieredCache, cfg.IndexCacheValidity, limits) objectStoreType := s.ObjectType if objectStoreType == "" { diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/testutils/testutils.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/testutils/testutils.go index 91bc56a0e70a..d6cfb69f0e22 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/testutils/testutils.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/testutils/testutils.go @@ -72,8 +72,8 @@ func CreateChunks(startIndex, batchSize int, start model.Time) ([]string, []chun func dummyChunk(now model.Time) chunk.Chunk { return dummyChunkFor(now, model.Metric{ model.MetricNameLabel: "foo", - "bar": "baz", - "toms": "code", + "bar": "baz", + "toms": "code", }) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/client.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/client.go index 764cf708949b..54679185f1c5 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/client.go @@ -27,6 +27,7 @@ var ingesterClientRequestDuration = promauto.NewHistogramVec(prometheus.Histogra type HealthAndIngesterClient interface { IngesterClient grpc_health_v1.HealthClient + Close() error } type closableHealthAndIngesterClient struct { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go index f177eefabf31..f95d1e27877b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go @@ -1,11 +1,11 @@ package client import ( - "bytes" stdjson "encoding/json" "fmt" "sort" "strconv" + "strings" "time" "unsafe" @@ -16,22 +16,6 @@ import ( var json = jsoniter.ConfigCompatibleWithStandardLibrary -// FromWriteRequest converts a WriteRequest proto into an array of samples. -func FromWriteRequest(req *WriteRequest) []model.Sample { - // Just guess that there is one sample per timeseries - samples := make([]model.Sample, 0, len(req.Timeseries)) - for _, ts := range req.Timeseries { - for _, s := range ts.Samples { - samples = append(samples, model.Sample{ - Metric: FromLabelPairs(ts.Labels), - Value: model.SampleValue(s.Value), - Timestamp: model.Time(s.TimestampMs), - }) - } - } - return samples -} - // ToWriteRequest converts an array of samples into a WriteRequest proto. func ToWriteRequest(samples []model.Sample, source WriteRequest_SourceEnum) *WriteRequest { req := &WriteRequest{ @@ -42,7 +26,7 @@ func ToWriteRequest(samples []model.Sample, source WriteRequest_SourceEnum) *Wri for _, s := range samples { ts := PreallocTimeseries{ TimeSeries: TimeSeries{ - Labels: ToLabelPairs(s.Metric), + Labels: FromMetricsToLabelAdapters(s.Metric), Samples: []Sample{ { Value: float64(s.Value), @@ -87,7 +71,7 @@ func ToQueryResponse(matrix model.Matrix) *QueryResponse { resp := &QueryResponse{} for _, ss := range matrix { ts := TimeSeries{ - Labels: ToLabelPairs(ss.Metric), + Labels: FromMetricsToLabelAdapters(ss.Metric), Samples: make([]Sample, 0, len(ss.Values)), } for _, s := range ss.Values { @@ -106,7 +90,7 @@ func FromQueryResponse(resp *QueryResponse) model.Matrix { m := make(model.Matrix, 0, len(resp.Timeseries)) for _, ts := range resp.Timeseries { var ss model.SampleStream - ss.Metric = FromLabelPairs(ts.Labels) + ss.Metric = FromLabelAdaptersToMetric(ts.Labels) ss.Values = make([]model.SamplePair, 0, len(ts.Samples)) for _, s := range ts.Samples { ss.Values = append(ss.Values, model.SamplePair{ @@ -153,7 +137,7 @@ func FromMetricsForLabelMatchersRequest(req *MetricsForLabelMatchersRequest) (mo func FromMetricsForLabelMatchersResponse(resp *MetricsForLabelMatchersResponse) []model.Metric { metrics := []model.Metric{} for _, m := range resp.Metric { - metrics = append(metrics, FromLabelPairs(m.Labels)) + metrics = append(metrics, FromLabelAdaptersToMetric(m.Labels)) } return metrics } @@ -208,70 +192,63 @@ func fromLabelMatchers(matchers []*LabelMatcher) ([]*labels.Matcher, error) { return result, nil } -// ToLabelPairs builds a []LabelPair from a model.Metric -func ToLabelPairs(metric model.Metric) []LabelPair { - labelPairs := make([]LabelPair, 0, len(metric)) - for k, v := range metric { - labelPairs = append(labelPairs, LabelPair{ - Name: []byte(k), - Value: []byte(v), - }) - } - sort.Sort(byLabel(labelPairs)) // The labels should be sorted upon initialisation. - return labelPairs +// FromLabelAdaptersToLabels casts []LabelAdapter to labels.Labels. +// It uses unsafe, but as LabelAdapter == labels.Label this should be safe. +// This allows us to use labels.Labels directly in protos. +func FromLabelAdaptersToLabels(ls []LabelAdapter) labels.Labels { + return *(*labels.Labels)(unsafe.Pointer(&ls)) } -type byLabel []LabelPair - -func (s byLabel) Len() int { return len(s) } -func (s byLabel) Less(i, j int) bool { return bytes.Compare(s[i].Name, s[j].Name) < 0 } -func (s byLabel) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// FromLabelPairs unpack a []LabelPair to a model.Metric -func FromLabelPairs(labelPairs []LabelPair) model.Metric { - metric := make(model.Metric, len(labelPairs)) - for _, l := range labelPairs { - metric[model.LabelName(l.Name)] = model.LabelValue(l.Value) - } - return metric +// FromLabelsToLabelAdapaters casts labels.Labels to []LabelAdapter. +// It uses unsafe, but as LabelAdapter == labels.Label this should be safe. +// This allows us to use labels.Labels directly in protos. +func FromLabelsToLabelAdapaters(ls labels.Labels) []LabelAdapter { + return *(*[]LabelAdapter)(unsafe.Pointer(&ls)) } -// FromLabelPairsToLabels unpack a []LabelPair to a labels.Labels -func FromLabelPairsToLabels(labelPairs []LabelPair) labels.Labels { - ls := make(labels.Labels, 0, len(labelPairs)) - for _, l := range labelPairs { - ls = append(ls, labels.Label{ - Name: string(l.Name), - Value: string(l.Value), - }) +// FromLabelAdaptersToMetric converts []LabelAdapter to a model.Metric. +// Don't do this on any performance sensitive paths. +func FromLabelAdaptersToMetric(ls []LabelAdapter) model.Metric { + result := make(model.Metric, len(ls)) + for _, l := range ls { + result[model.LabelName(l.Name)] = model.LabelValue(l.Value) } - return ls + return result } -// FromLabelsToLabelPairs converts labels.Labels to []LabelPair -func FromLabelsToLabelPairs(s labels.Labels) []LabelPair { - labelPairs := make([]LabelPair, 0, len(s)) - for _, v := range s { - labelPairs = append(labelPairs, LabelPair{ - Name: []byte(v.Name), - Value: []byte(v.Value), +// FromMetricsToLabelAdapters converts model.Metric to []LabelAdapter. +// Don't do this on any performance sensitive paths. +// The result is sorted. +func FromMetricsToLabelAdapters(metric model.Metric) []LabelAdapter { + result := make([]LabelAdapter, 0, len(metric)) + for k, v := range metric { + result = append(result, LabelAdapter{ + Name: string(k), + Value: string(v), }) } - return labelPairs // note already sorted + sort.Sort(byLabel(result)) // The labels should be sorted upon initialisation. + return result } +type byLabel []LabelAdapter + +func (s byLabel) Len() int { return len(s) } +func (s byLabel) Less(i, j int) bool { return strings.Compare(s[i].Name, s[j].Name) < 0 } +func (s byLabel) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + // FastFingerprint runs the same algorithm as Prometheus labelSetToFastFingerprint() -func FastFingerprint(labelPairs []LabelPair) model.Fingerprint { - if len(labelPairs) == 0 { +func FastFingerprint(ls []LabelAdapter) model.Fingerprint { + if len(ls) == 0 { return model.Metric(nil).FastFingerprint() } var result uint64 - for _, pair := range labelPairs { + for _, l := range ls { sum := hashNew() - sum = hashAdd(sum, pair.Name) + sum = hashAdd(sum, l.Name) sum = hashAddByte(sum, model.SeparatorByte) - sum = hashAdd(sum, pair.Value) + sum = hashAdd(sum, l.Value) result ^= sum } return model.Fingerprint(result) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go index a7d5490231d1..b7a9689fb025 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go @@ -3,29 +3,21 @@ package client -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "github.com/gogo/protobuf/gogoproto" - -import github_com_cortexproject_cortex_pkg_util_wire "github.com/cortexproject/cortex/pkg/util/wire" - -import strconv "strconv" - -import bytes "bytes" - -import strings "strings" -import reflect "reflect" - import ( - context "golang.org/x/net/context" + bytes "bytes" + context "context" + encoding_binary "encoding/binary" + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" grpc "google.golang.org/grpc" + io "io" + math "math" + reflect "reflect" + strconv "strconv" + strings "strings" ) -import encoding_binary "encoding/binary" - -import io "io" - // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf @@ -52,6 +44,7 @@ var MatchType_name = map[int32]string{ 2: "REGEX_MATCH", 3: "REGEX_NO_MATCH", } + var MatchType_value = map[string]int32{ "EQUAL": 0, "NOT_EQUAL": 1, @@ -60,7 +53,7 @@ var MatchType_value = map[string]int32{ } func (MatchType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_cortex_dc30309a17c87a98, []int{0} + return fileDescriptor_db0f8a1e534b119a, []int{0} } type WriteRequest_SourceEnum int32 @@ -74,24 +67,25 @@ var WriteRequest_SourceEnum_name = map[int32]string{ 0: "API", 1: "RULE", } + var WriteRequest_SourceEnum_value = map[string]int32{ "API": 0, "RULE": 1, } func (WriteRequest_SourceEnum) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_cortex_dc30309a17c87a98, []int{0, 0} + return fileDescriptor_db0f8a1e534b119a, []int{0, 0} } type WriteRequest struct { - Timeseries []PreallocTimeseries `protobuf:"bytes,1,rep,name=timeseries,customtype=PreallocTimeseries" json:"timeseries"` + Timeseries []PreallocTimeseries `protobuf:"bytes,1,rep,name=timeseries,proto3,customtype=PreallocTimeseries" json:"timeseries"` Source WriteRequest_SourceEnum `protobuf:"varint,2,opt,name=Source,json=source,proto3,enum=cortex.WriteRequest_SourceEnum" json:"Source,omitempty"` } func (m *WriteRequest) Reset() { *m = WriteRequest{} } func (*WriteRequest) ProtoMessage() {} func (*WriteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cortex_dc30309a17c87a98, []int{0} + return fileDescriptor_db0f8a1e534b119a, []int{0} } func (m *WriteRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -108,8 +102,8 @@ func (m *WriteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return b[:n], nil } } -func (dst *WriteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_WriteRequest.Merge(dst, src) +func (m *WriteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WriteRequest.Merge(m, src) } func (m *WriteRequest) XXX_Size() int { return m.Size() @@ -133,7 +127,7 @@ type WriteResponse struct { func (m *WriteResponse) Reset() { *m = WriteResponse{} } func (*WriteResponse) ProtoMessage() {} func (*WriteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cortex_dc30309a17c87a98, []int{1} + return fileDescriptor_db0f8a1e534b119a, []int{1} } func (m *WriteResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -150,8 +144,8 @@ func (m *WriteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error return b[:n], nil } } -func (dst *WriteResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_WriteResponse.Merge(dst, src) +func (m *WriteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_WriteResponse.Merge(m, src) } func (m *WriteResponse) XXX_Size() int { return m.Size() @@ -163,13 +157,13 @@ func (m *WriteResponse) XXX_DiscardUnknown() { var xxx_messageInfo_WriteResponse proto.InternalMessageInfo type ReadRequest struct { - Queries []*QueryRequest `protobuf:"bytes,1,rep,name=queries" json:"queries,omitempty"` + Queries []*QueryRequest `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"` } func (m *ReadRequest) Reset() { *m = ReadRequest{} } func (*ReadRequest) ProtoMessage() {} func (*ReadRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cortex_dc30309a17c87a98, []int{2} + return fileDescriptor_db0f8a1e534b119a, []int{2} } func (m *ReadRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -186,8 +180,8 @@ func (m *ReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return b[:n], nil } } -func (dst *ReadRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadRequest.Merge(dst, src) +func (m *ReadRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadRequest.Merge(m, src) } func (m *ReadRequest) XXX_Size() int { return m.Size() @@ -206,13 +200,13 @@ func (m *ReadRequest) GetQueries() []*QueryRequest { } type ReadResponse struct { - Results []*QueryResponse `protobuf:"bytes,1,rep,name=results" json:"results,omitempty"` + Results []*QueryResponse `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` } func (m *ReadResponse) Reset() { *m = ReadResponse{} } func (*ReadResponse) ProtoMessage() {} func (*ReadResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cortex_dc30309a17c87a98, []int{3} + return fileDescriptor_db0f8a1e534b119a, []int{3} } func (m *ReadResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -229,8 +223,8 @@ func (m *ReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return b[:n], nil } } -func (dst *ReadResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadResponse.Merge(dst, src) +func (m *ReadResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadResponse.Merge(m, src) } func (m *ReadResponse) XXX_Size() int { return m.Size() @@ -251,13 +245,13 @@ func (m *ReadResponse) GetResults() []*QueryResponse { type QueryRequest struct { StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` - Matchers []*LabelMatcher `protobuf:"bytes,3,rep,name=matchers" json:"matchers,omitempty"` + Matchers []*LabelMatcher `protobuf:"bytes,3,rep,name=matchers,proto3" json:"matchers,omitempty"` } func (m *QueryRequest) Reset() { *m = QueryRequest{} } func (*QueryRequest) ProtoMessage() {} func (*QueryRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cortex_dc30309a17c87a98, []int{4} + return fileDescriptor_db0f8a1e534b119a, []int{4} } func (m *QueryRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -274,8 +268,8 @@ func (m *QueryRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return b[:n], nil } } -func (dst *QueryRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryRequest.Merge(dst, src) +func (m *QueryRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRequest.Merge(m, src) } func (m *QueryRequest) XXX_Size() int { return m.Size() @@ -308,13 +302,13 @@ func (m *QueryRequest) GetMatchers() []*LabelMatcher { } type QueryResponse struct { - Timeseries []TimeSeries `protobuf:"bytes,1,rep,name=timeseries" json:"timeseries"` + Timeseries []TimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"` } func (m *QueryResponse) Reset() { *m = QueryResponse{} } func (*QueryResponse) ProtoMessage() {} func (*QueryResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cortex_dc30309a17c87a98, []int{5} + return fileDescriptor_db0f8a1e534b119a, []int{5} } func (m *QueryResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -331,8 +325,8 @@ func (m *QueryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error return b[:n], nil } } -func (dst *QueryResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryResponse.Merge(dst, src) +func (m *QueryResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryResponse.Merge(m, src) } func (m *QueryResponse) XXX_Size() int { return m.Size() @@ -352,13 +346,13 @@ func (m *QueryResponse) GetTimeseries() []TimeSeries { // QueryStreamResponse contains a batch of timeseries chunks. type QueryStreamResponse struct { - Timeseries []TimeSeriesChunk `protobuf:"bytes,1,rep,name=timeseries" json:"timeseries"` + Timeseries []TimeSeriesChunk `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"` } func (m *QueryStreamResponse) Reset() { *m = QueryStreamResponse{} } func (*QueryStreamResponse) ProtoMessage() {} func (*QueryStreamResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cortex_dc30309a17c87a98, []int{6} + return fileDescriptor_db0f8a1e534b119a, []int{6} } func (m *QueryStreamResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -375,8 +369,8 @@ func (m *QueryStreamResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, return b[:n], nil } } -func (dst *QueryStreamResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryStreamResponse.Merge(dst, src) +func (m *QueryStreamResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryStreamResponse.Merge(m, src) } func (m *QueryStreamResponse) XXX_Size() int { return m.Size() @@ -401,7 +395,7 @@ type LabelValuesRequest struct { func (m *LabelValuesRequest) Reset() { *m = LabelValuesRequest{} } func (*LabelValuesRequest) ProtoMessage() {} func (*LabelValuesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cortex_dc30309a17c87a98, []int{7} + return fileDescriptor_db0f8a1e534b119a, []int{7} } func (m *LabelValuesRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -418,8 +412,8 @@ func (m *LabelValuesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, return b[:n], nil } } -func (dst *LabelValuesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelValuesRequest.Merge(dst, src) +func (m *LabelValuesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelValuesRequest.Merge(m, src) } func (m *LabelValuesRequest) XXX_Size() int { return m.Size() @@ -438,13 +432,13 @@ func (m *LabelValuesRequest) GetLabelName() string { } type LabelValuesResponse struct { - LabelValues []string `protobuf:"bytes,1,rep,name=label_values,json=labelValues" json:"label_values,omitempty"` + LabelValues []string `protobuf:"bytes,1,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"` } func (m *LabelValuesResponse) Reset() { *m = LabelValuesResponse{} } func (*LabelValuesResponse) ProtoMessage() {} func (*LabelValuesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cortex_dc30309a17c87a98, []int{8} + return fileDescriptor_db0f8a1e534b119a, []int{8} } func (m *LabelValuesResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -461,8 +455,8 @@ func (m *LabelValuesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, return b[:n], nil } } -func (dst *LabelValuesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelValuesResponse.Merge(dst, src) +func (m *LabelValuesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelValuesResponse.Merge(m, src) } func (m *LabelValuesResponse) XXX_Size() int { return m.Size() @@ -486,7 +480,7 @@ type LabelNamesRequest struct { func (m *LabelNamesRequest) Reset() { *m = LabelNamesRequest{} } func (*LabelNamesRequest) ProtoMessage() {} func (*LabelNamesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cortex_dc30309a17c87a98, []int{9} + return fileDescriptor_db0f8a1e534b119a, []int{9} } func (m *LabelNamesRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -503,8 +497,8 @@ func (m *LabelNamesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, e return b[:n], nil } } -func (dst *LabelNamesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelNamesRequest.Merge(dst, src) +func (m *LabelNamesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelNamesRequest.Merge(m, src) } func (m *LabelNamesRequest) XXX_Size() int { return m.Size() @@ -516,13 +510,13 @@ func (m *LabelNamesRequest) XXX_DiscardUnknown() { var xxx_messageInfo_LabelNamesRequest proto.InternalMessageInfo type LabelNamesResponse struct { - LabelNames []string `protobuf:"bytes,1,rep,name=label_names,json=labelNames" json:"label_names,omitempty"` + LabelNames []string `protobuf:"bytes,1,rep,name=label_names,json=labelNames,proto3" json:"label_names,omitempty"` } func (m *LabelNamesResponse) Reset() { *m = LabelNamesResponse{} } func (*LabelNamesResponse) ProtoMessage() {} func (*LabelNamesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cortex_dc30309a17c87a98, []int{10} + return fileDescriptor_db0f8a1e534b119a, []int{10} } func (m *LabelNamesResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -539,8 +533,8 @@ func (m *LabelNamesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, return b[:n], nil } } -func (dst *LabelNamesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelNamesResponse.Merge(dst, src) +func (m *LabelNamesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelNamesResponse.Merge(m, src) } func (m *LabelNamesResponse) XXX_Size() int { return m.Size() @@ -564,7 +558,7 @@ type UserStatsRequest struct { func (m *UserStatsRequest) Reset() { *m = UserStatsRequest{} } func (*UserStatsRequest) ProtoMessage() {} func (*UserStatsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cortex_dc30309a17c87a98, []int{11} + return fileDescriptor_db0f8a1e534b119a, []int{11} } func (m *UserStatsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -581,8 +575,8 @@ func (m *UserStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, er return b[:n], nil } } -func (dst *UserStatsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_UserStatsRequest.Merge(dst, src) +func (m *UserStatsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserStatsRequest.Merge(m, src) } func (m *UserStatsRequest) XXX_Size() int { return m.Size() @@ -603,7 +597,7 @@ type UserStatsResponse struct { func (m *UserStatsResponse) Reset() { *m = UserStatsResponse{} } func (*UserStatsResponse) ProtoMessage() {} func (*UserStatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cortex_dc30309a17c87a98, []int{12} + return fileDescriptor_db0f8a1e534b119a, []int{12} } func (m *UserStatsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -620,8 +614,8 @@ func (m *UserStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, e return b[:n], nil } } -func (dst *UserStatsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UserStatsResponse.Merge(dst, src) +func (m *UserStatsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserStatsResponse.Merge(m, src) } func (m *UserStatsResponse) XXX_Size() int { return m.Size() @@ -662,13 +656,13 @@ func (m *UserStatsResponse) GetRuleIngestionRate() float64 { type UserIDStatsResponse struct { UserId string `protobuf:"bytes,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` - Data *UserStatsResponse `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"` + Data *UserStatsResponse `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` } func (m *UserIDStatsResponse) Reset() { *m = UserIDStatsResponse{} } func (*UserIDStatsResponse) ProtoMessage() {} func (*UserIDStatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cortex_dc30309a17c87a98, []int{13} + return fileDescriptor_db0f8a1e534b119a, []int{13} } func (m *UserIDStatsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -685,8 +679,8 @@ func (m *UserIDStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, return b[:n], nil } } -func (dst *UserIDStatsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UserIDStatsResponse.Merge(dst, src) +func (m *UserIDStatsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserIDStatsResponse.Merge(m, src) } func (m *UserIDStatsResponse) XXX_Size() int { return m.Size() @@ -712,13 +706,13 @@ func (m *UserIDStatsResponse) GetData() *UserStatsResponse { } type UsersStatsResponse struct { - Stats []*UserIDStatsResponse `protobuf:"bytes,1,rep,name=stats" json:"stats,omitempty"` + Stats []*UserIDStatsResponse `protobuf:"bytes,1,rep,name=stats,proto3" json:"stats,omitempty"` } func (m *UsersStatsResponse) Reset() { *m = UsersStatsResponse{} } func (*UsersStatsResponse) ProtoMessage() {} func (*UsersStatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cortex_dc30309a17c87a98, []int{14} + return fileDescriptor_db0f8a1e534b119a, []int{14} } func (m *UsersStatsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -735,8 +729,8 @@ func (m *UsersStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, return b[:n], nil } } -func (dst *UsersStatsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UsersStatsResponse.Merge(dst, src) +func (m *UsersStatsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UsersStatsResponse.Merge(m, src) } func (m *UsersStatsResponse) XXX_Size() int { return m.Size() @@ -757,13 +751,13 @@ func (m *UsersStatsResponse) GetStats() []*UserIDStatsResponse { type MetricsForLabelMatchersRequest struct { StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` - MatchersSet []*LabelMatchers `protobuf:"bytes,3,rep,name=matchers_set,json=matchersSet" json:"matchers_set,omitempty"` + MatchersSet []*LabelMatchers `protobuf:"bytes,3,rep,name=matchers_set,json=matchersSet,proto3" json:"matchers_set,omitempty"` } func (m *MetricsForLabelMatchersRequest) Reset() { *m = MetricsForLabelMatchersRequest{} } func (*MetricsForLabelMatchersRequest) ProtoMessage() {} func (*MetricsForLabelMatchersRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cortex_dc30309a17c87a98, []int{15} + return fileDescriptor_db0f8a1e534b119a, []int{15} } func (m *MetricsForLabelMatchersRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -780,8 +774,8 @@ func (m *MetricsForLabelMatchersRequest) XXX_Marshal(b []byte, deterministic boo return b[:n], nil } } -func (dst *MetricsForLabelMatchersRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricsForLabelMatchersRequest.Merge(dst, src) +func (m *MetricsForLabelMatchersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricsForLabelMatchersRequest.Merge(m, src) } func (m *MetricsForLabelMatchersRequest) XXX_Size() int { return m.Size() @@ -814,13 +808,13 @@ func (m *MetricsForLabelMatchersRequest) GetMatchersSet() []*LabelMatchers { } type MetricsForLabelMatchersResponse struct { - Metric []*Metric `protobuf:"bytes,1,rep,name=metric" json:"metric,omitempty"` + Metric []*Metric `protobuf:"bytes,1,rep,name=metric,proto3" json:"metric,omitempty"` } func (m *MetricsForLabelMatchersResponse) Reset() { *m = MetricsForLabelMatchersResponse{} } func (*MetricsForLabelMatchersResponse) ProtoMessage() {} func (*MetricsForLabelMatchersResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cortex_dc30309a17c87a98, []int{16} + return fileDescriptor_db0f8a1e534b119a, []int{16} } func (m *MetricsForLabelMatchersResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -837,8 +831,8 @@ func (m *MetricsForLabelMatchersResponse) XXX_Marshal(b []byte, deterministic bo return b[:n], nil } } -func (dst *MetricsForLabelMatchersResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricsForLabelMatchersResponse.Merge(dst, src) +func (m *MetricsForLabelMatchersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricsForLabelMatchersResponse.Merge(m, src) } func (m *MetricsForLabelMatchersResponse) XXX_Size() int { return m.Size() @@ -857,16 +851,16 @@ func (m *MetricsForLabelMatchersResponse) GetMetric() []*Metric { } type TimeSeriesChunk struct { - FromIngesterId string `protobuf:"bytes,1,opt,name=from_ingester_id,json=fromIngesterId,proto3" json:"from_ingester_id,omitempty"` - UserId string `protobuf:"bytes,2,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` - Labels []LabelPair `protobuf:"bytes,3,rep,name=labels" json:"labels"` - Chunks []Chunk `protobuf:"bytes,4,rep,name=chunks" json:"chunks"` + FromIngesterId string `protobuf:"bytes,1,opt,name=from_ingester_id,json=fromIngesterId,proto3" json:"from_ingester_id,omitempty"` + UserId string `protobuf:"bytes,2,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` + Labels []LabelAdapter `protobuf:"bytes,3,rep,name=labels,proto3,customtype=LabelAdapter" json:"labels"` + Chunks []Chunk `protobuf:"bytes,4,rep,name=chunks,proto3" json:"chunks"` } func (m *TimeSeriesChunk) Reset() { *m = TimeSeriesChunk{} } func (*TimeSeriesChunk) ProtoMessage() {} func (*TimeSeriesChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_cortex_dc30309a17c87a98, []int{17} + return fileDescriptor_db0f8a1e534b119a, []int{17} } func (m *TimeSeriesChunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -883,8 +877,8 @@ func (m *TimeSeriesChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, err return b[:n], nil } } -func (dst *TimeSeriesChunk) XXX_Merge(src proto.Message) { - xxx_messageInfo_TimeSeriesChunk.Merge(dst, src) +func (m *TimeSeriesChunk) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeSeriesChunk.Merge(m, src) } func (m *TimeSeriesChunk) XXX_Size() int { return m.Size() @@ -909,13 +903,6 @@ func (m *TimeSeriesChunk) GetUserId() string { return "" } -func (m *TimeSeriesChunk) GetLabels() []LabelPair { - if m != nil { - return m.Labels - } - return nil -} - func (m *TimeSeriesChunk) GetChunks() []Chunk { if m != nil { return m.Chunks @@ -933,7 +920,7 @@ type Chunk struct { func (m *Chunk) Reset() { *m = Chunk{} } func (*Chunk) ProtoMessage() {} func (*Chunk) Descriptor() ([]byte, []int) { - return fileDescriptor_cortex_dc30309a17c87a98, []int{18} + return fileDescriptor_db0f8a1e534b119a, []int{18} } func (m *Chunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -950,8 +937,8 @@ func (m *Chunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } -func (dst *Chunk) XXX_Merge(src proto.Message) { - xxx_messageInfo_Chunk.Merge(dst, src) +func (m *Chunk) XXX_Merge(src proto.Message) { + xxx_messageInfo_Chunk.Merge(m, src) } func (m *Chunk) XXX_Size() int { return m.Size() @@ -996,7 +983,7 @@ type TransferChunksResponse struct { func (m *TransferChunksResponse) Reset() { *m = TransferChunksResponse{} } func (*TransferChunksResponse) ProtoMessage() {} func (*TransferChunksResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cortex_dc30309a17c87a98, []int{19} + return fileDescriptor_db0f8a1e534b119a, []int{19} } func (m *TransferChunksResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1013,8 +1000,8 @@ func (m *TransferChunksResponse) XXX_Marshal(b []byte, deterministic bool) ([]by return b[:n], nil } } -func (dst *TransferChunksResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_TransferChunksResponse.Merge(dst, src) +func (m *TransferChunksResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TransferChunksResponse.Merge(m, src) } func (m *TransferChunksResponse) XXX_Size() int { return m.Size() @@ -1026,15 +1013,15 @@ func (m *TransferChunksResponse) XXX_DiscardUnknown() { var xxx_messageInfo_TransferChunksResponse proto.InternalMessageInfo type TimeSeries struct { - Labels []LabelPair `protobuf:"bytes,1,rep,name=labels" json:"labels"` + Labels []LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=LabelAdapter" json:"labels"` // Sorted by time, oldest sample first. - Samples []Sample `protobuf:"bytes,2,rep,name=samples" json:"samples"` + Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"` } func (m *TimeSeries) Reset() { *m = TimeSeries{} } func (*TimeSeries) ProtoMessage() {} func (*TimeSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_cortex_dc30309a17c87a98, []int{20} + return fileDescriptor_db0f8a1e534b119a, []int{20} } func (m *TimeSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1051,8 +1038,8 @@ func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } -func (dst *TimeSeries) XXX_Merge(src proto.Message) { - xxx_messageInfo_TimeSeries.Merge(dst, src) +func (m *TimeSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeSeries.Merge(m, src) } func (m *TimeSeries) XXX_Size() int { return m.Size() @@ -1063,13 +1050,6 @@ func (m *TimeSeries) XXX_DiscardUnknown() { var xxx_messageInfo_TimeSeries proto.InternalMessageInfo -func (m *TimeSeries) GetLabels() []LabelPair { - if m != nil { - return m.Labels - } - return nil -} - func (m *TimeSeries) GetSamples() []Sample { if m != nil { return m.Samples @@ -1078,14 +1058,14 @@ func (m *TimeSeries) GetSamples() []Sample { } type LabelPair struct { - Name github_com_cortexproject_cortex_pkg_util_wire.Bytes `protobuf:"bytes,1,opt,name=name,proto3,customtype=github.com/cortexproject/cortex/pkg/util/wire.Bytes" json:"name"` - Value github_com_cortexproject_cortex_pkg_util_wire.Bytes `protobuf:"bytes,2,opt,name=value,proto3,customtype=github.com/cortexproject/cortex/pkg/util/wire.Bytes" json:"value"` + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } func (m *LabelPair) Reset() { *m = LabelPair{} } func (*LabelPair) ProtoMessage() {} func (*LabelPair) Descriptor() ([]byte, []int) { - return fileDescriptor_cortex_dc30309a17c87a98, []int{21} + return fileDescriptor_db0f8a1e534b119a, []int{21} } func (m *LabelPair) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1102,8 +1082,8 @@ func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } -func (dst *LabelPair) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelPair.Merge(dst, src) +func (m *LabelPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelPair.Merge(m, src) } func (m *LabelPair) XXX_Size() int { return m.Size() @@ -1114,6 +1094,20 @@ func (m *LabelPair) XXX_DiscardUnknown() { var xxx_messageInfo_LabelPair proto.InternalMessageInfo +func (m *LabelPair) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + +func (m *LabelPair) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + type Sample struct { Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` TimestampMs int64 `protobuf:"varint,2,opt,name=timestamp_ms,json=timestampMs,proto3" json:"timestamp_ms,omitempty"` @@ -1122,7 +1116,7 @@ type Sample struct { func (m *Sample) Reset() { *m = Sample{} } func (*Sample) ProtoMessage() {} func (*Sample) Descriptor() ([]byte, []int) { - return fileDescriptor_cortex_dc30309a17c87a98, []int{22} + return fileDescriptor_db0f8a1e534b119a, []int{22} } func (m *Sample) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1139,8 +1133,8 @@ func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } -func (dst *Sample) XXX_Merge(src proto.Message) { - xxx_messageInfo_Sample.Merge(dst, src) +func (m *Sample) XXX_Merge(src proto.Message) { + xxx_messageInfo_Sample.Merge(m, src) } func (m *Sample) XXX_Size() int { return m.Size() @@ -1166,13 +1160,13 @@ func (m *Sample) GetTimestampMs() int64 { } type LabelMatchers struct { - Matchers []*LabelMatcher `protobuf:"bytes,1,rep,name=matchers" json:"matchers,omitempty"` + Matchers []*LabelMatcher `protobuf:"bytes,1,rep,name=matchers,proto3" json:"matchers,omitempty"` } func (m *LabelMatchers) Reset() { *m = LabelMatchers{} } func (*LabelMatchers) ProtoMessage() {} func (*LabelMatchers) Descriptor() ([]byte, []int) { - return fileDescriptor_cortex_dc30309a17c87a98, []int{23} + return fileDescriptor_db0f8a1e534b119a, []int{23} } func (m *LabelMatchers) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1189,8 +1183,8 @@ func (m *LabelMatchers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error return b[:n], nil } } -func (dst *LabelMatchers) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelMatchers.Merge(dst, src) +func (m *LabelMatchers) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelMatchers.Merge(m, src) } func (m *LabelMatchers) XXX_Size() int { return m.Size() @@ -1209,13 +1203,13 @@ func (m *LabelMatchers) GetMatchers() []*LabelMatcher { } type Metric struct { - Labels []LabelPair `protobuf:"bytes,1,rep,name=labels" json:"labels"` + Labels []LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=LabelAdapter" json:"labels"` } func (m *Metric) Reset() { *m = Metric{} } func (*Metric) ProtoMessage() {} func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_cortex_dc30309a17c87a98, []int{24} + return fileDescriptor_db0f8a1e534b119a, []int{24} } func (m *Metric) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1232,8 +1226,8 @@ func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } -func (dst *Metric) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metric.Merge(dst, src) +func (m *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(m, src) } func (m *Metric) XXX_Size() int { return m.Size() @@ -1244,13 +1238,6 @@ func (m *Metric) XXX_DiscardUnknown() { var xxx_messageInfo_Metric proto.InternalMessageInfo -func (m *Metric) GetLabels() []LabelPair { - if m != nil { - return m.Labels - } - return nil -} - type LabelMatcher struct { Type MatchType `protobuf:"varint,1,opt,name=type,proto3,enum=cortex.MatchType" json:"type,omitempty"` Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` @@ -1260,7 +1247,7 @@ type LabelMatcher struct { func (m *LabelMatcher) Reset() { *m = LabelMatcher{} } func (*LabelMatcher) ProtoMessage() {} func (*LabelMatcher) Descriptor() ([]byte, []int) { - return fileDescriptor_cortex_dc30309a17c87a98, []int{25} + return fileDescriptor_db0f8a1e534b119a, []int{25} } func (m *LabelMatcher) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1277,8 +1264,8 @@ func (m *LabelMatcher) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return b[:n], nil } } -func (dst *LabelMatcher) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelMatcher.Merge(dst, src) +func (m *LabelMatcher) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelMatcher.Merge(m, src) } func (m *LabelMatcher) XXX_Size() int { return m.Size() @@ -1311,6 +1298,8 @@ func (m *LabelMatcher) GetValue() string { } func init() { + proto.RegisterEnum("cortex.MatchType", MatchType_name, MatchType_value) + proto.RegisterEnum("cortex.WriteRequest_SourceEnum", WriteRequest_SourceEnum_name, WriteRequest_SourceEnum_value) proto.RegisterType((*WriteRequest)(nil), "cortex.WriteRequest") proto.RegisterType((*WriteResponse)(nil), "cortex.WriteResponse") proto.RegisterType((*ReadRequest)(nil), "cortex.ReadRequest") @@ -1337,9 +1326,93 @@ func init() { proto.RegisterType((*LabelMatchers)(nil), "cortex.LabelMatchers") proto.RegisterType((*Metric)(nil), "cortex.Metric") proto.RegisterType((*LabelMatcher)(nil), "cortex.LabelMatcher") - proto.RegisterEnum("cortex.MatchType", MatchType_name, MatchType_value) - proto.RegisterEnum("cortex.WriteRequest_SourceEnum", WriteRequest_SourceEnum_name, WriteRequest_SourceEnum_value) } + +func init() { + proto.RegisterFile("github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto", fileDescriptor_db0f8a1e534b119a) +} + +var fileDescriptor_db0f8a1e534b119a = []byte{ + // 1231 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xcf, 0x6f, 0x1b, 0xc5, + 0x17, 0xdf, 0x8d, 0x7f, 0x24, 0x7e, 0x76, 0x5c, 0x67, 0x92, 0x7e, 0x9b, 0xba, 0xfa, 0x6e, 0xca, + 0x48, 0x2d, 0x11, 0x50, 0xa7, 0xa4, 0x2a, 0xf4, 0x40, 0x55, 0x9c, 0x36, 0x6d, 0x8d, 0x92, 0x34, + 0x1d, 0xbb, 0x80, 0x90, 0xd0, 0x6a, 0x63, 0x4f, 0x9d, 0xa5, 0xfb, 0xc3, 0x9d, 0x99, 0x45, 0xf4, + 0x80, 0xc4, 0x7f, 0x00, 0x47, 0xf8, 0x0f, 0x38, 0x73, 0x81, 0x33, 0xa7, 0x1e, 0x7b, 0xac, 0x38, + 0x54, 0xd4, 0xbd, 0x70, 0xec, 0x9f, 0x80, 0x76, 0x66, 0x76, 0xbd, 0xeb, 0xda, 0xa2, 0x02, 0xf5, + 0xe6, 0x79, 0xef, 0xf3, 0x3e, 0xf3, 0xe6, 0xfd, 0x5c, 0xc3, 0xc7, 0x43, 0x57, 0x1c, 0x47, 0x47, + 0xad, 0x7e, 0xe8, 0x6f, 0xf5, 0x43, 0x26, 0xe8, 0x37, 0x23, 0x16, 0x7e, 0x45, 0xfb, 0x42, 0x9f, + 0xb6, 0x46, 0x0f, 0x86, 0x5b, 0x6e, 0x30, 0xa4, 0x5c, 0x50, 0xb6, 0xd5, 0xf7, 0x5c, 0x1a, 0x24, + 0xaa, 0xd6, 0x88, 0x85, 0x22, 0x44, 0x65, 0x75, 0x6a, 0x5e, 0xc8, 0x30, 0x0d, 0xc3, 0x61, 0xb8, + 0x25, 0xd5, 0x47, 0xd1, 0x7d, 0x79, 0x92, 0x07, 0xf9, 0x4b, 0x99, 0xe1, 0xdf, 0x4c, 0xa8, 0x7d, + 0xc6, 0x5c, 0x41, 0x09, 0x7d, 0x18, 0x51, 0x2e, 0xd0, 0x01, 0x80, 0x70, 0x7d, 0xca, 0x29, 0x73, + 0x29, 0x5f, 0x37, 0xcf, 0x16, 0x36, 0xab, 0xdb, 0xa8, 0xa5, 0xaf, 0xea, 0xb9, 0x3e, 0xed, 0x4a, + 0xcd, 0x4e, 0xf3, 0xf1, 0xb3, 0x0d, 0xe3, 0x8f, 0x67, 0x1b, 0xe8, 0x90, 0x51, 0xc7, 0xf3, 0xc2, + 0x7e, 0x2f, 0xb5, 0x22, 0x19, 0x06, 0xf4, 0x21, 0x94, 0xbb, 0x61, 0xc4, 0xfa, 0x74, 0x7d, 0xe1, + 0xac, 0xb9, 0x59, 0xdf, 0xde, 0x48, 0xb8, 0xb2, 0xb7, 0xb6, 0x14, 0x64, 0x37, 0x88, 0x7c, 0x52, + 0xe6, 0xf2, 0x37, 0xde, 0x00, 0x98, 0x48, 0xd1, 0x22, 0x14, 0xda, 0x87, 0x9d, 0x86, 0x81, 0x96, + 0xa0, 0x48, 0xee, 0xed, 0xed, 0x36, 0x4c, 0x7c, 0x02, 0x96, 0x35, 0x07, 0x1f, 0x85, 0x01, 0xa7, + 0xf8, 0x2a, 0x54, 0x09, 0x75, 0x06, 0xc9, 0x4b, 0x5a, 0xb0, 0xf8, 0x30, 0xca, 0x3e, 0x63, 0x2d, + 0xb9, 0xfa, 0x6e, 0x44, 0xd9, 0x23, 0x0d, 0x23, 0x09, 0x08, 0x5f, 0x83, 0x9a, 0x32, 0x57, 0x74, + 0x68, 0x0b, 0x16, 0x19, 0xe5, 0x91, 0x27, 0x12, 0xfb, 0x93, 0x53, 0xf6, 0x0a, 0x47, 0x12, 0x14, + 0xfe, 0xd1, 0x84, 0x5a, 0x96, 0x1a, 0xbd, 0x07, 0x88, 0x0b, 0x87, 0x09, 0x5b, 0xc6, 0x43, 0x38, + 0xfe, 0xc8, 0xf6, 0x63, 0x32, 0x73, 0xb3, 0x40, 0x1a, 0x52, 0xd3, 0x4b, 0x14, 0xfb, 0x1c, 0x6d, + 0x42, 0x83, 0x06, 0x83, 0x3c, 0x76, 0x41, 0x62, 0xeb, 0x34, 0x18, 0x64, 0x91, 0x17, 0x61, 0xc9, + 0x77, 0x44, 0xff, 0x98, 0x32, 0xbe, 0x5e, 0xc8, 0x3f, 0x6d, 0xcf, 0x39, 0xa2, 0xde, 0xbe, 0x52, + 0x92, 0x14, 0x85, 0x3b, 0xb0, 0x9c, 0x73, 0x1a, 0x5d, 0x79, 0xcd, 0x34, 0x17, 0xe3, 0x34, 0x67, + 0x13, 0x8a, 0x7b, 0xb0, 0x2a, 0xa9, 0xba, 0x82, 0x51, 0xc7, 0x4f, 0x09, 0xaf, 0xce, 0x20, 0x3c, + 0xf5, 0x2a, 0xe1, 0xf5, 0xe3, 0x28, 0x78, 0x30, 0x83, 0xf5, 0x12, 0x20, 0xe9, 0xfa, 0xa7, 0x8e, + 0x17, 0x51, 0x9e, 0x04, 0xf0, 0xff, 0x00, 0x5e, 0x2c, 0xb5, 0x03, 0xc7, 0xa7, 0x32, 0x70, 0x15, + 0x52, 0x91, 0x92, 0x03, 0xc7, 0xa7, 0xf8, 0x0a, 0xac, 0xe6, 0x8c, 0xb4, 0x2b, 0x6f, 0x41, 0x4d, + 0x59, 0x7d, 0x2d, 0xe5, 0xd2, 0x99, 0x0a, 0xa9, 0x7a, 0x13, 0x28, 0x5e, 0x85, 0x95, 0xbd, 0x84, + 0x26, 0xb9, 0x0d, 0x5f, 0xd6, 0x3e, 0x68, 0xa1, 0x66, 0xdb, 0x80, 0xea, 0xc4, 0x87, 0x84, 0x0c, + 0x52, 0x27, 0x38, 0x46, 0xd0, 0xb8, 0xc7, 0x29, 0xeb, 0x0a, 0x47, 0xa4, 0x54, 0xbf, 0x9a, 0xb0, + 0x92, 0x11, 0x6a, 0xaa, 0x73, 0x50, 0x57, 0x3d, 0xec, 0x86, 0x81, 0xcd, 0x1c, 0xa1, 0x9e, 0x64, + 0x92, 0xe5, 0x54, 0x4a, 0x1c, 0x41, 0xe3, 0x57, 0x07, 0x91, 0x6f, 0xeb, 0x50, 0xc6, 0x25, 0x50, + 0x24, 0x95, 0x20, 0xf2, 0x55, 0x04, 0xe3, 0xaa, 0x72, 0x46, 0xae, 0x3d, 0xc5, 0x54, 0x90, 0x4c, + 0x0d, 0x67, 0xe4, 0x76, 0x72, 0x64, 0x2d, 0x58, 0x65, 0x91, 0x47, 0xa7, 0xe1, 0x45, 0x09, 0x5f, + 0x89, 0x55, 0x39, 0x3c, 0xfe, 0x12, 0x56, 0x63, 0xc7, 0x3b, 0x37, 0xf2, 0xae, 0x9f, 0x82, 0xc5, + 0x88, 0x53, 0x66, 0xbb, 0x03, 0x9d, 0x86, 0x72, 0x7c, 0xec, 0x0c, 0xd0, 0x05, 0x28, 0x0e, 0x1c, + 0xe1, 0x48, 0x37, 0xab, 0xdb, 0xa7, 0x93, 0x8c, 0xbf, 0xf2, 0x78, 0x22, 0x61, 0xf8, 0x16, 0xa0, + 0x58, 0xc5, 0xf3, 0xec, 0xef, 0x43, 0x89, 0xc7, 0x02, 0x5d, 0x37, 0x67, 0xb2, 0x2c, 0x53, 0x9e, + 0x10, 0x85, 0xc4, 0xbf, 0x98, 0x60, 0xed, 0x53, 0xc1, 0xdc, 0x3e, 0xbf, 0x19, 0xb2, 0x6c, 0xd9, + 0xf3, 0x37, 0xdd, 0x7e, 0x57, 0xa0, 0x96, 0x34, 0x96, 0xcd, 0xa9, 0xd0, 0x2d, 0x78, 0x72, 0x56, + 0x0b, 0x72, 0x52, 0x4d, 0xa0, 0x5d, 0x2a, 0x70, 0x07, 0x36, 0xe6, 0xfa, 0xac, 0x43, 0x71, 0x1e, + 0xca, 0xbe, 0x84, 0xe8, 0x58, 0xd4, 0x13, 0x5a, 0x65, 0x48, 0xb4, 0x16, 0xff, 0x6e, 0xc2, 0x89, + 0xa9, 0xb6, 0x8a, 0x9f, 0x70, 0x9f, 0x85, 0xbe, 0x9d, 0x2c, 0x8a, 0x49, 0xb6, 0xea, 0xb1, 0xbc, + 0xa3, 0xc5, 0x9d, 0x41, 0x36, 0x9d, 0x0b, 0xb9, 0x74, 0x5e, 0x83, 0xb2, 0x2c, 0xed, 0x64, 0xb0, + 0xac, 0xe4, 0x5e, 0x75, 0xe8, 0xb8, 0x6c, 0x67, 0x4d, 0x4f, 0xfe, 0x9a, 0x14, 0xb5, 0x07, 0xce, + 0x48, 0x50, 0x46, 0xb4, 0x19, 0x7a, 0x17, 0xca, 0xfd, 0xd8, 0x19, 0xbe, 0x5e, 0x94, 0x04, 0xcb, + 0x09, 0x41, 0xb6, 0xf3, 0x35, 0x04, 0x7f, 0x6f, 0x42, 0x49, 0xb9, 0xfe, 0xa6, 0x72, 0xd5, 0x84, + 0x25, 0x1a, 0xf4, 0xc3, 0x81, 0x1b, 0x0c, 0x65, 0x8b, 0x94, 0x48, 0x7a, 0x46, 0x48, 0x97, 0x6e, + 0xdc, 0x0b, 0x35, 0x5d, 0x9f, 0xeb, 0xf0, 0xbf, 0x1e, 0x73, 0x02, 0x7e, 0x9f, 0x32, 0xe9, 0x58, + 0x9a, 0x18, 0xfc, 0x2d, 0xc0, 0x24, 0xde, 0x99, 0x38, 0x99, 0xff, 0x2e, 0x4e, 0x2d, 0x58, 0xe4, + 0x8e, 0x3f, 0xf2, 0x64, 0x87, 0xe7, 0x12, 0xdd, 0x95, 0x62, 0x1d, 0xa9, 0x04, 0x84, 0x2f, 0x43, + 0x25, 0xa5, 0x8e, 0x3d, 0x4f, 0x27, 0x62, 0x8d, 0xc8, 0xdf, 0x68, 0x0d, 0x4a, 0x72, 0xde, 0xc9, + 0x40, 0xd4, 0x88, 0x3a, 0xe0, 0x36, 0x94, 0x15, 0xdf, 0x44, 0xaf, 0x66, 0x8e, 0x3a, 0xc4, 0xb3, + 0x72, 0x46, 0x14, 0xab, 0x62, 0x12, 0x42, 0xdc, 0x86, 0xe5, 0x5c, 0xa9, 0xe6, 0xd6, 0x8f, 0xf9, + 0x9a, 0xeb, 0xa7, 0xac, 0xca, 0xf7, 0x3f, 0xc7, 0x0d, 0xdb, 0x50, 0xcb, 0x5e, 0x82, 0xce, 0x41, + 0x51, 0x3c, 0x1a, 0xa9, 0x57, 0xd5, 0x27, 0x74, 0x52, 0xdd, 0x7b, 0x34, 0xa2, 0x44, 0xaa, 0xd3, + 0x88, 0xa9, 0x6a, 0x9f, 0x8a, 0x58, 0x41, 0x0a, 0xd5, 0xe1, 0x9d, 0x4f, 0xa0, 0x92, 0x1a, 0xa3, + 0x0a, 0x94, 0x76, 0xef, 0xde, 0x6b, 0xef, 0x35, 0x0c, 0xb4, 0x0c, 0x95, 0x83, 0x3b, 0x3d, 0x5b, + 0x1d, 0x4d, 0x74, 0x02, 0xaa, 0x64, 0xf7, 0xd6, 0xee, 0xe7, 0xf6, 0x7e, 0xbb, 0x77, 0xfd, 0x76, + 0x63, 0x01, 0x21, 0xa8, 0x2b, 0xc1, 0xc1, 0x1d, 0x2d, 0x2b, 0x6c, 0xff, 0x54, 0x82, 0xa5, 0xa4, + 0xeb, 0xd0, 0x65, 0x28, 0x1e, 0x46, 0xfc, 0x18, 0xad, 0xcd, 0xfa, 0x02, 0x6a, 0x9e, 0x9c, 0x92, + 0xea, 0xaa, 0x33, 0xd0, 0x07, 0x50, 0x92, 0xfb, 0x16, 0xcd, 0xfc, 0x7c, 0x69, 0xce, 0xfe, 0x28, + 0xc1, 0x06, 0xba, 0x01, 0xd5, 0xcc, 0x9e, 0x9e, 0x63, 0x7d, 0x26, 0x27, 0xcd, 0xaf, 0x74, 0x6c, + 0x5c, 0x34, 0xd1, 0x6d, 0xa8, 0x66, 0x56, 0x2c, 0x6a, 0xe6, 0xd2, 0x95, 0x5b, 0xd6, 0x13, 0xae, + 0x19, 0x3b, 0x19, 0x1b, 0x68, 0x17, 0x60, 0xb2, 0x5d, 0xd1, 0xe9, 0x1c, 0x38, 0xbb, 0x86, 0x9b, + 0xcd, 0x59, 0xaa, 0x94, 0x66, 0x07, 0x2a, 0xe9, 0x6e, 0x41, 0xeb, 0x33, 0xd6, 0x8d, 0x22, 0x99, + 0xbf, 0x88, 0xb0, 0x81, 0x6e, 0x42, 0xad, 0xed, 0x79, 0xaf, 0x43, 0xd3, 0xcc, 0x6a, 0xf8, 0x34, + 0x8f, 0x07, 0xa7, 0xe6, 0x8c, 0x73, 0x74, 0x3e, 0x3f, 0xb6, 0xe7, 0xed, 0xa8, 0xe6, 0xdb, 0xff, + 0x88, 0x4b, 0x6f, 0xdb, 0x87, 0x7a, 0x7e, 0x34, 0xa1, 0x79, 0xdf, 0x57, 0x4d, 0x2b, 0x55, 0xcc, + 0x9e, 0x65, 0xc6, 0xa6, 0xb9, 0xf3, 0xd1, 0x93, 0xe7, 0x96, 0xf1, 0xf4, 0xb9, 0x65, 0xbc, 0x7c, + 0x6e, 0x99, 0xdf, 0x8d, 0x2d, 0xf3, 0xe7, 0xb1, 0x65, 0x3e, 0x1e, 0x5b, 0xe6, 0x93, 0xb1, 0x65, + 0xfe, 0x39, 0xb6, 0xcc, 0xbf, 0xc6, 0x96, 0xf1, 0x72, 0x6c, 0x99, 0x3f, 0xbc, 0xb0, 0x8c, 0x27, + 0x2f, 0x2c, 0xe3, 0xe9, 0x0b, 0xcb, 0xf8, 0xa2, 0xac, 0xfe, 0x7b, 0x1c, 0x95, 0xe5, 0xdf, 0x87, + 0x4b, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x95, 0x27, 0x3b, 0x4e, 0xb9, 0x0c, 0x00, 0x00, +} + func (x MatchType) String() string { s, ok := MatchType_name[int32(x)] if ok { @@ -1864,7 +1937,7 @@ func (this *TimeSeriesChunk) Equal(that interface{}) bool { return false } for i := range this.Labels { - if !this.Labels[i].Equal(&that1.Labels[i]) { + if !this.Labels[i].Equal(that1.Labels[i]) { return false } } @@ -1955,7 +2028,7 @@ func (this *TimeSeries) Equal(that interface{}) bool { return false } for i := range this.Labels { - if !this.Labels[i].Equal(&that1.Labels[i]) { + if !this.Labels[i].Equal(that1.Labels[i]) { return false } } @@ -1988,10 +2061,10 @@ func (this *LabelPair) Equal(that interface{}) bool { } else if this == nil { return false } - if !this.Name.Equal(that1.Name) { + if !bytes.Equal(this.Name, that1.Name) { return false } - if !this.Value.Equal(that1.Value) { + if !bytes.Equal(this.Value, that1.Value) { return false } return true @@ -2075,7 +2148,7 @@ func (this *Metric) Equal(that interface{}) bool { return false } for i := range this.Labels { - if !this.Labels[i].Equal(&that1.Labels[i]) { + if !this.Labels[i].Equal(that1.Labels[i]) { return false } } @@ -2321,13 +2394,7 @@ func (this *TimeSeriesChunk) GoString() string { s = append(s, "&client.TimeSeriesChunk{") s = append(s, "FromIngesterId: "+fmt.Sprintf("%#v", this.FromIngesterId)+",\n") s = append(s, "UserId: "+fmt.Sprintf("%#v", this.UserId)+",\n") - if this.Labels != nil { - vs := make([]*LabelPair, len(this.Labels)) - for i := range vs { - vs[i] = &this.Labels[i] - } - s = append(s, "Labels: "+fmt.Sprintf("%#v", vs)+",\n") - } + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") if this.Chunks != nil { vs := make([]*Chunk, len(this.Chunks)) for i := range vs { @@ -2366,13 +2433,7 @@ func (this *TimeSeries) GoString() string { } s := make([]string, 0, 6) s = append(s, "&client.TimeSeries{") - if this.Labels != nil { - vs := make([]*LabelPair, len(this.Labels)) - for i := range vs { - vs[i] = &this.Labels[i] - } - s = append(s, "Labels: "+fmt.Sprintf("%#v", vs)+",\n") - } + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") if this.Samples != nil { vs := make([]*Sample, len(this.Samples)) for i := range vs { @@ -2423,13 +2484,7 @@ func (this *Metric) GoString() string { } s := make([]string, 0, 5) s = append(s, "&client.Metric{") - if this.Labels != nil { - vs := make([]*LabelPair, len(this.Labels)) - for i := range vs { - vs[i] = &this.Labels[i] - } - s = append(s, "Labels: "+fmt.Sprintf("%#v", vs)+",\n") - } + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -3259,9 +3314,9 @@ func (m *UserIDStatsResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintCortex(dAtA, i, uint64(m.Data.Size())) - n1, err := m.Data.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + n1, err1 := m.Data.MarshalTo(dAtA[i:]) + if err1 != nil { + return 0, err1 } i += n1 } @@ -3536,22 +3591,18 @@ func (m *LabelPair) MarshalTo(dAtA []byte) (int, error) { _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintCortex(dAtA, i, uint64(m.Name.Size())) - n2, err := m.Name.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintCortex(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) } - i += n2 - dAtA[i] = 0x12 - i++ - i = encodeVarintCortex(dAtA, i, uint64(m.Value.Size())) - n3, err := m.Value.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Value) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintCortex(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) } - i += n3 return i, nil } @@ -4033,10 +4084,14 @@ func (m *LabelPair) Size() (n int) { } var l int _ = l - l = m.Name.Size() - n += 1 + l + sovCortex(uint64(l)) - l = m.Value.Size() - n += 1 + l + sovCortex(uint64(l)) + l = len(m.Name) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } return n } @@ -4142,8 +4197,13 @@ func (this *ReadRequest) String() string { if this == nil { return "nil" } + repeatedStringForQueries := "[]*QueryRequest{" + for _, f := range this.Queries { + repeatedStringForQueries += strings.Replace(f.String(), "QueryRequest", "QueryRequest", 1) + "," + } + repeatedStringForQueries += "}" s := strings.Join([]string{`&ReadRequest{`, - `Queries:` + strings.Replace(fmt.Sprintf("%v", this.Queries), "QueryRequest", "QueryRequest", 1) + `,`, + `Queries:` + repeatedStringForQueries + `,`, `}`, }, "") return s @@ -4152,8 +4212,13 @@ func (this *ReadResponse) String() string { if this == nil { return "nil" } + repeatedStringForResults := "[]*QueryResponse{" + for _, f := range this.Results { + repeatedStringForResults += strings.Replace(f.String(), "QueryResponse", "QueryResponse", 1) + "," + } + repeatedStringForResults += "}" s := strings.Join([]string{`&ReadResponse{`, - `Results:` + strings.Replace(fmt.Sprintf("%v", this.Results), "QueryResponse", "QueryResponse", 1) + `,`, + `Results:` + repeatedStringForResults + `,`, `}`, }, "") return s @@ -4162,10 +4227,15 @@ func (this *QueryRequest) String() string { if this == nil { return "nil" } + repeatedStringForMatchers := "[]*LabelMatcher{" + for _, f := range this.Matchers { + repeatedStringForMatchers += strings.Replace(f.String(), "LabelMatcher", "LabelMatcher", 1) + "," + } + repeatedStringForMatchers += "}" s := strings.Join([]string{`&QueryRequest{`, `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, - `Matchers:` + strings.Replace(fmt.Sprintf("%v", this.Matchers), "LabelMatcher", "LabelMatcher", 1) + `,`, + `Matchers:` + repeatedStringForMatchers + `,`, `}`, }, "") return s @@ -4174,8 +4244,13 @@ func (this *QueryResponse) String() string { if this == nil { return "nil" } + repeatedStringForTimeseries := "[]TimeSeries{" + for _, f := range this.Timeseries { + repeatedStringForTimeseries += strings.Replace(strings.Replace(f.String(), "TimeSeries", "TimeSeries", 1), `&`, ``, 1) + "," + } + repeatedStringForTimeseries += "}" s := strings.Join([]string{`&QueryResponse{`, - `Timeseries:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timeseries), "TimeSeries", "TimeSeries", 1), `&`, ``, 1) + `,`, + `Timeseries:` + repeatedStringForTimeseries + `,`, `}`, }, "") return s @@ -4184,8 +4259,13 @@ func (this *QueryStreamResponse) String() string { if this == nil { return "nil" } + repeatedStringForTimeseries := "[]TimeSeriesChunk{" + for _, f := range this.Timeseries { + repeatedStringForTimeseries += strings.Replace(strings.Replace(f.String(), "TimeSeriesChunk", "TimeSeriesChunk", 1), `&`, ``, 1) + "," + } + repeatedStringForTimeseries += "}" s := strings.Join([]string{`&QueryStreamResponse{`, - `Timeseries:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timeseries), "TimeSeriesChunk", "TimeSeriesChunk", 1), `&`, ``, 1) + `,`, + `Timeseries:` + repeatedStringForTimeseries + `,`, `}`, }, "") return s @@ -4257,7 +4337,7 @@ func (this *UserIDStatsResponse) String() string { } s := strings.Join([]string{`&UserIDStatsResponse{`, `UserId:` + fmt.Sprintf("%v", this.UserId) + `,`, - `Data:` + strings.Replace(fmt.Sprintf("%v", this.Data), "UserStatsResponse", "UserStatsResponse", 1) + `,`, + `Data:` + strings.Replace(this.Data.String(), "UserStatsResponse", "UserStatsResponse", 1) + `,`, `}`, }, "") return s @@ -4266,8 +4346,13 @@ func (this *UsersStatsResponse) String() string { if this == nil { return "nil" } + repeatedStringForStats := "[]*UserIDStatsResponse{" + for _, f := range this.Stats { + repeatedStringForStats += strings.Replace(f.String(), "UserIDStatsResponse", "UserIDStatsResponse", 1) + "," + } + repeatedStringForStats += "}" s := strings.Join([]string{`&UsersStatsResponse{`, - `Stats:` + strings.Replace(fmt.Sprintf("%v", this.Stats), "UserIDStatsResponse", "UserIDStatsResponse", 1) + `,`, + `Stats:` + repeatedStringForStats + `,`, `}`, }, "") return s @@ -4276,10 +4361,15 @@ func (this *MetricsForLabelMatchersRequest) String() string { if this == nil { return "nil" } + repeatedStringForMatchersSet := "[]*LabelMatchers{" + for _, f := range this.MatchersSet { + repeatedStringForMatchersSet += strings.Replace(f.String(), "LabelMatchers", "LabelMatchers", 1) + "," + } + repeatedStringForMatchersSet += "}" s := strings.Join([]string{`&MetricsForLabelMatchersRequest{`, `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, - `MatchersSet:` + strings.Replace(fmt.Sprintf("%v", this.MatchersSet), "LabelMatchers", "LabelMatchers", 1) + `,`, + `MatchersSet:` + repeatedStringForMatchersSet + `,`, `}`, }, "") return s @@ -4288,8 +4378,13 @@ func (this *MetricsForLabelMatchersResponse) String() string { if this == nil { return "nil" } + repeatedStringForMetric := "[]*Metric{" + for _, f := range this.Metric { + repeatedStringForMetric += strings.Replace(f.String(), "Metric", "Metric", 1) + "," + } + repeatedStringForMetric += "}" s := strings.Join([]string{`&MetricsForLabelMatchersResponse{`, - `Metric:` + strings.Replace(fmt.Sprintf("%v", this.Metric), "Metric", "Metric", 1) + `,`, + `Metric:` + repeatedStringForMetric + `,`, `}`, }, "") return s @@ -4298,11 +4393,16 @@ func (this *TimeSeriesChunk) String() string { if this == nil { return "nil" } + repeatedStringForChunks := "[]Chunk{" + for _, f := range this.Chunks { + repeatedStringForChunks += strings.Replace(strings.Replace(f.String(), "Chunk", "Chunk", 1), `&`, ``, 1) + "," + } + repeatedStringForChunks += "}" s := strings.Join([]string{`&TimeSeriesChunk{`, `FromIngesterId:` + fmt.Sprintf("%v", this.FromIngesterId) + `,`, `UserId:` + fmt.Sprintf("%v", this.UserId) + `,`, - `Labels:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Labels), "LabelPair", "LabelPair", 1), `&`, ``, 1) + `,`, - `Chunks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Chunks), "Chunk", "Chunk", 1), `&`, ``, 1) + `,`, + `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, + `Chunks:` + repeatedStringForChunks + `,`, `}`, }, "") return s @@ -4333,9 +4433,14 @@ func (this *TimeSeries) String() string { if this == nil { return "nil" } + repeatedStringForSamples := "[]Sample{" + for _, f := range this.Samples { + repeatedStringForSamples += strings.Replace(strings.Replace(f.String(), "Sample", "Sample", 1), `&`, ``, 1) + "," + } + repeatedStringForSamples += "}" s := strings.Join([]string{`&TimeSeries{`, - `Labels:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Labels), "LabelPair", "LabelPair", 1), `&`, ``, 1) + `,`, - `Samples:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Samples), "Sample", "Sample", 1), `&`, ``, 1) + `,`, + `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, + `Samples:` + repeatedStringForSamples + `,`, `}`, }, "") return s @@ -4366,8 +4471,13 @@ func (this *LabelMatchers) String() string { if this == nil { return "nil" } + repeatedStringForMatchers := "[]*LabelMatcher{" + for _, f := range this.Matchers { + repeatedStringForMatchers += strings.Replace(f.String(), "LabelMatcher", "LabelMatcher", 1) + "," + } + repeatedStringForMatchers += "}" s := strings.Join([]string{`&LabelMatchers{`, - `Matchers:` + strings.Replace(fmt.Sprintf("%v", this.Matchers), "LabelMatcher", "LabelMatcher", 1) + `,`, + `Matchers:` + repeatedStringForMatchers + `,`, `}`, }, "") return s @@ -4377,7 +4487,7 @@ func (this *Metric) String() string { return "nil" } s := strings.Join([]string{`&Metric{`, - `Labels:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Labels), "LabelPair", "LabelPair", 1), `&`, ``, 1) + `,`, + `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, `}`, }, "") return s @@ -4417,7 +4527,7 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4445,7 +4555,7 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4454,6 +4564,9 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthCortex } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4476,7 +4589,7 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Source |= (WriteRequest_SourceEnum(b) & 0x7F) << shift + m.Source |= WriteRequest_SourceEnum(b&0x7F) << shift if b < 0x80 { break } @@ -4490,6 +4603,9 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthCortex } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4517,7 +4633,7 @@ func (m *WriteResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4540,6 +4656,9 @@ func (m *WriteResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthCortex } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4567,7 +4686,7 @@ func (m *ReadRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4595,7 +4714,7 @@ func (m *ReadRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4604,6 +4723,9 @@ func (m *ReadRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthCortex } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4621,6 +4743,9 @@ func (m *ReadRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthCortex } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4648,7 +4773,7 @@ func (m *ReadResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4676,7 +4801,7 @@ func (m *ReadResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4685,6 +4810,9 @@ func (m *ReadResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthCortex } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4702,6 +4830,9 @@ func (m *ReadResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthCortex } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4729,7 +4860,7 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4757,7 +4888,7 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.StartTimestampMs |= (int64(b) & 0x7F) << shift + m.StartTimestampMs |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -4776,7 +4907,7 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.EndTimestampMs |= (int64(b) & 0x7F) << shift + m.EndTimestampMs |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -4795,7 +4926,7 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4804,6 +4935,9 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthCortex } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4821,6 +4955,9 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthCortex } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4848,7 +4985,7 @@ func (m *QueryResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4876,7 +5013,7 @@ func (m *QueryResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4885,6 +5022,9 @@ func (m *QueryResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthCortex } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4902,6 +5042,9 @@ func (m *QueryResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthCortex } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4929,7 +5072,7 @@ func (m *QueryStreamResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4957,7 +5100,7 @@ func (m *QueryStreamResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4966,6 +5109,9 @@ func (m *QueryStreamResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthCortex } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4983,6 +5129,9 @@ func (m *QueryStreamResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthCortex } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5010,7 +5159,7 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5038,7 +5187,7 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5048,6 +5197,9 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthCortex } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5062,6 +5214,9 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthCortex } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5089,7 +5244,7 @@ func (m *LabelValuesResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5117,7 +5272,7 @@ func (m *LabelValuesResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5127,6 +5282,9 @@ func (m *LabelValuesResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthCortex } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5141,6 +5299,9 @@ func (m *LabelValuesResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthCortex } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5168,7 +5329,7 @@ func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5191,6 +5352,9 @@ func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthCortex } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5218,7 +5382,7 @@ func (m *LabelNamesResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5246,7 +5410,7 @@ func (m *LabelNamesResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5256,6 +5420,9 @@ func (m *LabelNamesResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthCortex } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5270,6 +5437,9 @@ func (m *LabelNamesResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthCortex } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5297,7 +5467,7 @@ func (m *UserStatsRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5320,6 +5490,9 @@ func (m *UserStatsRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthCortex } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5347,7 +5520,7 @@ func (m *UserStatsResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5386,7 +5559,7 @@ func (m *UserStatsResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumSeries |= (uint64(b) & 0x7F) << shift + m.NumSeries |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5422,6 +5595,9 @@ func (m *UserStatsResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthCortex } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5449,7 +5625,7 @@ func (m *UserIDStatsResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5477,7 +5653,7 @@ func (m *UserIDStatsResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5487,6 +5663,9 @@ func (m *UserIDStatsResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthCortex } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5506,7 +5685,7 @@ func (m *UserIDStatsResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5515,6 +5694,9 @@ func (m *UserIDStatsResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthCortex } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5534,6 +5716,9 @@ func (m *UserIDStatsResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthCortex } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5561,7 +5746,7 @@ func (m *UsersStatsResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5589,7 +5774,7 @@ func (m *UsersStatsResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5598,6 +5783,9 @@ func (m *UsersStatsResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthCortex } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5615,6 +5803,9 @@ func (m *UsersStatsResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthCortex } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5642,7 +5833,7 @@ func (m *MetricsForLabelMatchersRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5670,7 +5861,7 @@ func (m *MetricsForLabelMatchersRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.StartTimestampMs |= (int64(b) & 0x7F) << shift + m.StartTimestampMs |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5689,7 +5880,7 @@ func (m *MetricsForLabelMatchersRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.EndTimestampMs |= (int64(b) & 0x7F) << shift + m.EndTimestampMs |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5708,7 +5899,7 @@ func (m *MetricsForLabelMatchersRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5717,6 +5908,9 @@ func (m *MetricsForLabelMatchersRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthCortex } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5734,6 +5928,9 @@ func (m *MetricsForLabelMatchersRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthCortex } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5761,7 +5958,7 @@ func (m *MetricsForLabelMatchersResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5789,7 +5986,7 @@ func (m *MetricsForLabelMatchersResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5798,6 +5995,9 @@ func (m *MetricsForLabelMatchersResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthCortex } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5815,6 +6015,9 @@ func (m *MetricsForLabelMatchersResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthCortex } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5842,7 +6045,7 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5870,7 +6073,7 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5880,6 +6083,9 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error { return ErrInvalidLengthCortex } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5899,7 +6105,7 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5909,6 +6115,9 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error { return ErrInvalidLengthCortex } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5928,7 +6137,7 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5937,10 +6146,13 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error { return ErrInvalidLengthCortex } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Labels = append(m.Labels, LabelPair{}) + m.Labels = append(m.Labels, LabelAdapter{}) if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -5959,7 +6171,7 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5968,6 +6180,9 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error { return ErrInvalidLengthCortex } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5985,6 +6200,9 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthCortex } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6012,7 +6230,7 @@ func (m *Chunk) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6040,7 +6258,7 @@ func (m *Chunk) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.StartTimestampMs |= (int64(b) & 0x7F) << shift + m.StartTimestampMs |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -6059,7 +6277,7 @@ func (m *Chunk) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.EndTimestampMs |= (int64(b) & 0x7F) << shift + m.EndTimestampMs |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -6078,7 +6296,7 @@ func (m *Chunk) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Encoding |= (int32(b) & 0x7F) << shift + m.Encoding |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6097,7 +6315,7 @@ func (m *Chunk) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6106,6 +6324,9 @@ func (m *Chunk) Unmarshal(dAtA []byte) error { return ErrInvalidLengthCortex } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6123,6 +6344,9 @@ func (m *Chunk) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthCortex } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6150,7 +6374,7 @@ func (m *TransferChunksResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6173,6 +6397,9 @@ func (m *TransferChunksResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthCortex } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6200,7 +6427,7 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6228,7 +6455,7 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6237,10 +6464,13 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error { return ErrInvalidLengthCortex } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Labels = append(m.Labels, LabelPair{}) + m.Labels = append(m.Labels, LabelAdapter{}) if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -6259,7 +6489,7 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6268,6 +6498,9 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error { return ErrInvalidLengthCortex } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6285,6 +6518,9 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthCortex } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6312,7 +6548,7 @@ func (m *LabelPair) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6340,7 +6576,7 @@ func (m *LabelPair) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6349,11 +6585,15 @@ func (m *LabelPair) Unmarshal(dAtA []byte) error { return ErrInvalidLengthCortex } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Name.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) + if m.Name == nil { + m.Name = []byte{} } iNdEx = postIndex case 2: @@ -6370,7 +6610,7 @@ func (m *LabelPair) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6379,11 +6619,15 @@ func (m *LabelPair) Unmarshal(dAtA []byte) error { return ErrInvalidLengthCortex } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} } iNdEx = postIndex default: @@ -6395,6 +6639,9 @@ func (m *LabelPair) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthCortex } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6422,7 +6669,7 @@ func (m *Sample) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6461,7 +6708,7 @@ func (m *Sample) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TimestampMs |= (int64(b) & 0x7F) << shift + m.TimestampMs |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -6475,6 +6722,9 @@ func (m *Sample) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthCortex } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6502,7 +6752,7 @@ func (m *LabelMatchers) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6530,7 +6780,7 @@ func (m *LabelMatchers) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6539,6 +6789,9 @@ func (m *LabelMatchers) Unmarshal(dAtA []byte) error { return ErrInvalidLengthCortex } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6556,6 +6809,9 @@ func (m *LabelMatchers) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthCortex } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6583,7 +6839,7 @@ func (m *Metric) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6611,7 +6867,7 @@ func (m *Metric) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6620,10 +6876,13 @@ func (m *Metric) Unmarshal(dAtA []byte) error { return ErrInvalidLengthCortex } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Labels = append(m.Labels, LabelPair{}) + m.Labels = append(m.Labels, LabelAdapter{}) if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -6637,6 +6896,9 @@ func (m *Metric) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthCortex } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6664,7 +6926,7 @@ func (m *LabelMatcher) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6692,7 +6954,7 @@ func (m *LabelMatcher) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Type |= (MatchType(b) & 0x7F) << shift + m.Type |= MatchType(b&0x7F) << shift if b < 0x80 { break } @@ -6711,7 +6973,7 @@ func (m *LabelMatcher) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6721,6 +6983,9 @@ func (m *LabelMatcher) Unmarshal(dAtA []byte) error { return ErrInvalidLengthCortex } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6740,7 +7005,7 @@ func (m *LabelMatcher) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6750,6 +7015,9 @@ func (m *LabelMatcher) Unmarshal(dAtA []byte) error { return ErrInvalidLengthCortex } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6764,6 +7032,9 @@ func (m *LabelMatcher) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthCortex } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6830,10 +7101,13 @@ func skipCortex(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthCortex } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthCortex + } return iNdEx, nil case 3: for { @@ -6862,6 +7136,9 @@ func skipCortex(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthCortex + } } return iNdEx, nil case 4: @@ -6880,89 +7157,3 @@ var ( ErrInvalidLengthCortex = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowCortex = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto", fileDescriptor_cortex_dc30309a17c87a98) -} - -var fileDescriptor_cortex_dc30309a17c87a98 = []byte{ - // 1247 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xdf, 0x8d, 0xff, 0x24, 0x7e, 0x76, 0x5c, 0x67, 0xd2, 0xd2, 0xd4, 0x15, 0xeb, 0x32, 0x52, - 0x4b, 0x04, 0xd4, 0x2e, 0xa9, 0x0a, 0x45, 0x50, 0x81, 0xd3, 0xba, 0xad, 0x51, 0x92, 0xa6, 0x6b, - 0x17, 0x10, 0x12, 0x5a, 0x6d, 0xec, 0xa9, 0xb3, 0x74, 0xff, 0xb8, 0x33, 0xb3, 0x40, 0x6e, 0x7c, - 0x03, 0x38, 0xc2, 0x37, 0xe0, 0x86, 0xc4, 0x05, 0x3e, 0x42, 0x8f, 0x3d, 0x56, 0x1c, 0x2a, 0xea, - 0x5e, 0x38, 0xf6, 0x23, 0xa0, 0x9d, 0x99, 0x5d, 0xef, 0xba, 0xb6, 0x08, 0x42, 0xbd, 0x79, 0xde, - 0xfb, 0xbd, 0xdf, 0xbe, 0xbf, 0xf3, 0xc6, 0xf0, 0xc9, 0xc8, 0xe1, 0x87, 0xe1, 0x41, 0x73, 0x10, - 0x78, 0xad, 0x41, 0x40, 0x39, 0xf9, 0x6e, 0x4c, 0x83, 0xaf, 0xc9, 0x80, 0xab, 0x53, 0x6b, 0xfc, - 0x60, 0xd4, 0x72, 0xfc, 0x11, 0x61, 0x9c, 0xd0, 0xd6, 0xc0, 0x75, 0x88, 0x1f, 0xab, 0x9a, 0x63, - 0x1a, 0xf0, 0x00, 0x15, 0xe5, 0xa9, 0x7e, 0x31, 0xc5, 0x34, 0x0a, 0x46, 0x41, 0x4b, 0xa8, 0x0f, - 0xc2, 0xfb, 0xe2, 0x24, 0x0e, 0xe2, 0x97, 0x34, 0xc3, 0x7f, 0xe8, 0x50, 0xf9, 0x9c, 0x3a, 0x9c, - 0x98, 0xe4, 0x61, 0x48, 0x18, 0x47, 0x7b, 0x00, 0xdc, 0xf1, 0x08, 0x23, 0xd4, 0x21, 0x6c, 0x43, - 0x3f, 0x97, 0xdb, 0x2c, 0x6f, 0xa1, 0xa6, 0xfa, 0x54, 0xdf, 0xf1, 0x48, 0x4f, 0x68, 0xb6, 0xeb, - 0x8f, 0x9e, 0x36, 0xb4, 0x3f, 0x9f, 0x36, 0xd0, 0x3e, 0x25, 0xb6, 0xeb, 0x06, 0x83, 0x7e, 0x62, - 0x65, 0xa6, 0x18, 0xd0, 0xfb, 0x50, 0xec, 0x05, 0x21, 0x1d, 0x90, 0x8d, 0xa5, 0x73, 0xfa, 0x66, - 0x75, 0xab, 0x11, 0x73, 0xa5, 0xbf, 0xda, 0x94, 0x90, 0x8e, 0x1f, 0x7a, 0x66, 0x91, 0x89, 0xdf, - 0xb8, 0x01, 0x30, 0x95, 0xa2, 0x65, 0xc8, 0xb5, 0xf7, 0xbb, 0x35, 0x0d, 0xad, 0x40, 0xde, 0xbc, - 0xb7, 0xd3, 0xa9, 0xe9, 0xf8, 0x04, 0xac, 0x2a, 0x0e, 0x36, 0x0e, 0x7c, 0x46, 0xf0, 0x35, 0x28, - 0x9b, 0xc4, 0x1e, 0xc6, 0x91, 0x34, 0x61, 0xf9, 0x61, 0x98, 0x0e, 0xe3, 0x64, 0xfc, 0xe9, 0xbb, - 0x21, 0xa1, 0x47, 0x0a, 0x66, 0xc6, 0x20, 0xfc, 0x31, 0x54, 0xa4, 0xb9, 0xa4, 0x43, 0x2d, 0x58, - 0xa6, 0x84, 0x85, 0x2e, 0x8f, 0xed, 0x4f, 0xcd, 0xd8, 0x4b, 0x9c, 0x19, 0xa3, 0xf0, 0x4f, 0x3a, - 0x54, 0xd2, 0xd4, 0xe8, 0x1d, 0x40, 0x8c, 0xdb, 0x94, 0x5b, 0x22, 0x1f, 0xdc, 0xf6, 0xc6, 0x96, - 0x17, 0x91, 0xe9, 0x9b, 0x39, 0xb3, 0x26, 0x34, 0xfd, 0x58, 0xb1, 0xcb, 0xd0, 0x26, 0xd4, 0x88, - 0x3f, 0xcc, 0x62, 0x97, 0x04, 0xb6, 0x4a, 0xfc, 0x61, 0x1a, 0x79, 0x09, 0x56, 0x3c, 0x9b, 0x0f, - 0x0e, 0x09, 0x65, 0x1b, 0xb9, 0x6c, 0x68, 0x3b, 0xf6, 0x01, 0x71, 0x77, 0xa5, 0xd2, 0x4c, 0x50, - 0xb8, 0x0b, 0xab, 0x19, 0xa7, 0xd1, 0xd5, 0x63, 0x96, 0x39, 0x1f, 0x95, 0x39, 0x5d, 0x50, 0xdc, - 0x87, 0x75, 0x41, 0xd5, 0xe3, 0x94, 0xd8, 0x5e, 0x42, 0x78, 0x6d, 0x0e, 0xe1, 0xe9, 0x97, 0x09, - 0xaf, 0x1f, 0x86, 0xfe, 0x83, 0x39, 0xac, 0x97, 0x01, 0x09, 0xd7, 0x3f, 0xb3, 0xdd, 0x90, 0xb0, - 0x38, 0x81, 0xaf, 0x03, 0xb8, 0x91, 0xd4, 0xf2, 0x6d, 0x8f, 0x88, 0xc4, 0x95, 0xcc, 0x92, 0x90, - 0xec, 0xd9, 0x1e, 0xc1, 0x57, 0x61, 0x3d, 0x63, 0xa4, 0x5c, 0x79, 0x03, 0x2a, 0xd2, 0xea, 0x1b, - 0x21, 0x17, 0xce, 0x94, 0xcc, 0xb2, 0x3b, 0x85, 0xe2, 0x75, 0x58, 0xdb, 0x89, 0x69, 0xe2, 0xaf, - 0xe1, 0x2b, 0xca, 0x07, 0x25, 0x54, 0x6c, 0x0d, 0x28, 0x4f, 0x7d, 0x88, 0xc9, 0x20, 0x71, 0x82, - 0x61, 0x04, 0xb5, 0x7b, 0x8c, 0xd0, 0x1e, 0xb7, 0x79, 0x42, 0xf5, 0xbb, 0x0e, 0x6b, 0x29, 0xa1, - 0xa2, 0x3a, 0x0f, 0x55, 0x39, 0xc3, 0x4e, 0xe0, 0x5b, 0xd4, 0xe6, 0x32, 0x24, 0xdd, 0x5c, 0x4d, - 0xa4, 0xa6, 0xcd, 0x49, 0x14, 0xb5, 0x1f, 0x7a, 0x96, 0x4a, 0x65, 0xd4, 0x02, 0x79, 0xb3, 0xe4, - 0x87, 0x9e, 0xcc, 0x60, 0xd4, 0x55, 0xf6, 0xd8, 0xb1, 0x66, 0x98, 0x72, 0x82, 0xa9, 0x66, 0x8f, - 0x9d, 0x6e, 0x86, 0xac, 0x09, 0xeb, 0x34, 0x74, 0xc9, 0x2c, 0x3c, 0x2f, 0xe0, 0x6b, 0x91, 0x2a, - 0x83, 0xc7, 0x5f, 0xc1, 0x7a, 0xe4, 0x78, 0xf7, 0x46, 0xd6, 0xf5, 0xd3, 0xb0, 0x1c, 0x32, 0x42, - 0x2d, 0x67, 0xa8, 0xca, 0x50, 0x8c, 0x8e, 0xdd, 0x21, 0xba, 0x08, 0xf9, 0xa1, 0xcd, 0x6d, 0xe1, - 0x66, 0x79, 0xeb, 0x4c, 0x5c, 0xf1, 0x97, 0x82, 0x37, 0x05, 0x0c, 0xdf, 0x02, 0x14, 0xa9, 0x58, - 0x96, 0xfd, 0x5d, 0x28, 0xb0, 0x48, 0xa0, 0xfa, 0xe6, 0x6c, 0x9a, 0x65, 0xc6, 0x13, 0x53, 0x22, - 0xf1, 0x6f, 0x3a, 0x18, 0xbb, 0x84, 0x53, 0x67, 0xc0, 0x6e, 0x06, 0x34, 0xdd, 0xf6, 0xec, 0x55, - 0x8f, 0xdf, 0x55, 0xa8, 0xc4, 0x83, 0x65, 0x31, 0xc2, 0xd5, 0x08, 0x9e, 0x9a, 0x37, 0x82, 0xcc, - 0x2c, 0xc7, 0xd0, 0x1e, 0xe1, 0xb8, 0x0b, 0x8d, 0x85, 0x3e, 0xab, 0x54, 0x5c, 0x80, 0xa2, 0x27, - 0x20, 0x2a, 0x17, 0xd5, 0x98, 0x56, 0x1a, 0x9a, 0x4a, 0x1b, 0xc5, 0x7f, 0x62, 0x66, 0xac, 0xa2, - 0x10, 0xee, 0xd3, 0xc0, 0xb3, 0xe2, 0x45, 0x31, 0xad, 0x56, 0x35, 0x92, 0x77, 0x95, 0xb8, 0x3b, - 0x4c, 0x97, 0x73, 0x29, 0x53, 0xce, 0x16, 0x14, 0x45, 0x6b, 0xc7, 0x17, 0xcb, 0x5a, 0x26, 0xaa, - 0x7d, 0xdb, 0xa1, 0x6a, 0x78, 0x15, 0x0c, 0xbd, 0x0d, 0xc5, 0x41, 0xf4, 0x71, 0xb6, 0x91, 0x17, - 0x06, 0xab, 0xb1, 0x41, 0x7a, 0xd2, 0x15, 0x04, 0xff, 0xa0, 0x43, 0x41, 0xba, 0xfa, 0xaa, 0x6a, - 0x53, 0x87, 0x15, 0xe2, 0x0f, 0x82, 0xa1, 0xe3, 0x8f, 0xc4, 0x48, 0x14, 0xcc, 0xe4, 0x8c, 0x90, - 0x6a, 0xd5, 0xa8, 0xf7, 0x2b, 0xaa, 0x1f, 0x37, 0xe0, 0xb5, 0x3e, 0xb5, 0x7d, 0x76, 0x9f, 0x50, - 0xe1, 0x58, 0x52, 0x08, 0xec, 0x01, 0x4c, 0xf3, 0x9b, 0xca, 0x8b, 0x7e, 0xbc, 0xbc, 0x34, 0x61, - 0x99, 0xd9, 0xde, 0xd8, 0x15, 0x13, 0x9c, 0x29, 0x64, 0x4f, 0x88, 0x15, 0x3c, 0x06, 0xe1, 0x5f, - 0x75, 0x28, 0x25, 0x5c, 0xe8, 0x0e, 0xe4, 0x93, 0x2b, 0xaf, 0xb2, 0xfd, 0xa1, 0xda, 0xb5, 0x97, - 0x8f, 0xf3, 0x4a, 0x08, 0xb9, 0xe3, 0xb6, 0xbe, 0x75, 0x28, 0x69, 0x6e, 0x1f, 0x71, 0xc2, 0x4c, - 0x41, 0x84, 0xee, 0x42, 0x41, 0xdc, 0x86, 0x22, 0x6d, 0xff, 0x93, 0x51, 0x32, 0xe1, 0x36, 0x14, - 0x65, 0x28, 0xe8, 0x64, 0x4c, 0x2e, 0xaf, 0x33, 0x79, 0x88, 0xae, 0xe1, 0x39, 0x05, 0x2b, 0xf3, - 0x69, 0xb5, 0x70, 0x1b, 0x56, 0x33, 0x53, 0x90, 0xd9, 0x6c, 0xfa, 0xb1, 0x36, 0xdb, 0x07, 0x50, - 0x94, 0x93, 0xf1, 0x9f, 0x4b, 0x84, 0x2d, 0xa8, 0xa4, 0x49, 0xd1, 0x79, 0xc8, 0xf3, 0xa3, 0xb1, - 0x8c, 0xa2, 0x3a, 0x35, 0x17, 0xea, 0xfe, 0xd1, 0x98, 0x98, 0x42, 0x1d, 0xb5, 0x91, 0xa8, 0x8d, - 0x1c, 0x1c, 0x99, 0xde, 0x24, 0x03, 0x39, 0x21, 0x94, 0x87, 0xb7, 0x3e, 0x85, 0x52, 0x62, 0x8c, - 0x4a, 0x50, 0xe8, 0xdc, 0xbd, 0xd7, 0xde, 0xa9, 0x69, 0x68, 0x15, 0x4a, 0x7b, 0x77, 0xfa, 0x96, - 0x3c, 0xea, 0xe8, 0x04, 0x94, 0xcd, 0xce, 0xad, 0xce, 0x17, 0xd6, 0x6e, 0xbb, 0x7f, 0xfd, 0x76, - 0x6d, 0x09, 0x21, 0xa8, 0x4a, 0xc1, 0xde, 0x1d, 0x25, 0xcb, 0x6d, 0xfd, 0x5c, 0x80, 0x95, 0x78, - 0x80, 0xd1, 0x15, 0xc8, 0xef, 0x87, 0xec, 0x10, 0x9d, 0x9c, 0xf7, 0x98, 0xaa, 0x9f, 0x9a, 0x91, - 0xaa, 0x86, 0xd6, 0xd0, 0x7b, 0x50, 0x10, 0xab, 0x1b, 0xcd, 0x7d, 0x09, 0xd5, 0xe7, 0xbf, 0x6f, - 0xb0, 0x86, 0x6e, 0x40, 0x39, 0xb5, 0xf2, 0x17, 0x58, 0x9f, 0xcd, 0x48, 0xb3, 0xaf, 0x03, 0xac, - 0x5d, 0xd2, 0xd1, 0x6d, 0x28, 0xa7, 0xb6, 0x35, 0xaa, 0x67, 0xca, 0x93, 0xd9, 0xfb, 0x53, 0xae, - 0x39, 0xeb, 0x1d, 0x6b, 0xa8, 0x03, 0x30, 0x5d, 0xd4, 0xe8, 0x4c, 0x06, 0x9c, 0xde, 0xe8, 0xf5, - 0xfa, 0x3c, 0x55, 0x42, 0xb3, 0x0d, 0xa5, 0x64, 0x4d, 0xa1, 0x8d, 0x39, 0x9b, 0x4b, 0x92, 0x2c, - 0xde, 0x69, 0x58, 0x43, 0x37, 0xa1, 0xd2, 0x76, 0xdd, 0xe3, 0xd0, 0xd4, 0xd3, 0x1a, 0x36, 0xcb, - 0xe3, 0xc2, 0xe9, 0x05, 0x9b, 0x01, 0x5d, 0xc8, 0x6e, 0x80, 0x45, 0xeb, 0xae, 0xfe, 0xe6, 0xbf, - 0xe2, 0x92, 0xaf, 0xed, 0x42, 0x35, 0x7b, 0xeb, 0xa1, 0x45, 0x4f, 0xb5, 0xba, 0x91, 0x28, 0xe6, - 0x5f, 0x93, 0xda, 0xa6, 0xbe, 0xfd, 0xd1, 0xe3, 0x67, 0x86, 0xf6, 0xe4, 0x99, 0xa1, 0xbd, 0x78, - 0x66, 0xe8, 0xdf, 0x4f, 0x0c, 0xfd, 0x97, 0x89, 0xa1, 0x3f, 0x9a, 0x18, 0xfa, 0xe3, 0x89, 0xa1, - 0xff, 0x35, 0x31, 0xf4, 0xbf, 0x27, 0x86, 0xf6, 0x62, 0x62, 0xe8, 0x3f, 0x3e, 0x37, 0xb4, 0xc7, - 0xcf, 0x0d, 0xed, 0xc9, 0x73, 0x43, 0xfb, 0xb2, 0x28, 0xff, 0xc6, 0x1c, 0x14, 0xc5, 0x3f, 0x91, - 0xcb, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x33, 0xbf, 0x53, 0xf9, 0x04, 0x0d, 0x00, 0x00, -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto index 02e17e35b8a1..e1659919920f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto @@ -104,7 +104,7 @@ message MetricsForLabelMatchersResponse { message TimeSeriesChunk { string from_ingester_id = 1; string user_id = 2; - repeated LabelPair labels = 3 [(gogoproto.nullable) = false]; + repeated LabelPair labels = 3 [(gogoproto.nullable) = false, (gogoproto.customtype) = "LabelAdapter"]; repeated Chunk chunks = 4 [(gogoproto.nullable) = false]; } @@ -119,14 +119,14 @@ message TransferChunksResponse { } message TimeSeries { - repeated LabelPair labels = 1 [(gogoproto.nullable) = false]; + repeated LabelPair labels = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "LabelAdapter"]; // Sorted by time, oldest sample first. repeated Sample samples = 2 [(gogoproto.nullable) = false]; } message LabelPair { - bytes name = 1 [(gogoproto.customtype) = "github.com/cortexproject/cortex/pkg/util/wire.Bytes", (gogoproto.nullable) = false]; - bytes value = 2 [(gogoproto.customtype) = "github.com/cortexproject/cortex/pkg/util/wire.Bytes", (gogoproto.nullable) = false]; + bytes name = 1; + bytes value = 2; } message Sample { @@ -139,7 +139,7 @@ message LabelMatchers { } message Metric { - repeated LabelPair labels = 1 [(gogoproto.nullable) = false]; + repeated LabelPair labels = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "LabelAdapter"]; } enum MatchType { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go index 41453b10aaa9..c41508595060 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go @@ -19,6 +19,8 @@ package client const ( offset64 = 14695981039346656037 prime64 = 1099511628211 + offset32 = 2166136261 + prime32 = 16777619 ) // hashNew initializies a new fnv64a hash value. @@ -27,7 +29,8 @@ func hashNew() uint64 { } // hashAdd adds a string to a fnv64a hash value, returning the updated hash. -func hashAdd(h uint64, s []byte) uint64 { +// Note this is the same algorithm as Go stdlib `sum64a.Write()` +func hashAdd(h uint64, s string) uint64 { for i := 0; i < len(s); i++ { h ^= uint64(s[i]) h *= prime64 @@ -41,3 +44,18 @@ func hashAddByte(h uint64, b byte) uint64 { h *= prime64 return h } + +// HashNew32 initializies a new fnv32 hash value. +func HashNew32() uint32 { + return offset32 +} + +// HashAdd32 adds a string to a fnv32 hash value, returning the updated hash. +// Note this is the same algorithm as Go stdlib `sum32.Write()` +func HashAdd32(h uint32, s string) uint32 { + for i := 0; i < len(s); i++ { + h *= prime32 + h ^= uint32(s[i]) + } + return h +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/timeseries.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/timeseries.go index b5b46b9ab878..8c7cc40588c8 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/timeseries.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/timeseries.go @@ -1,6 +1,14 @@ package client -import "flag" +import ( + "flag" + "fmt" + "io" + "strings" + "unsafe" + + "github.com/prometheus/prometheus/pkg/labels" +) var ( expectedTimeseries = 100 @@ -37,7 +45,182 @@ type PreallocTimeseries struct { // Unmarshal implements proto.Message. func (p *PreallocTimeseries) Unmarshal(dAtA []byte) error { - p.Labels = make([]LabelPair, 0, expectedLabels) + p.Labels = make([]LabelAdapter, 0, expectedLabels) p.Samples = make([]Sample, 0, expectedSamplesPerSeries) return p.TimeSeries.Unmarshal(dAtA) } + +// LabelAdapter is a labels.Label that can be marshalled to/from protos. +type LabelAdapter labels.Label + +// Marshal implements proto.Marshaller. +func (bs *LabelAdapter) Marshal() ([]byte, error) { + buf := make([]byte, bs.Size()) + _, err := bs.MarshalTo(buf) + return buf, err +} + +// MarshalTo implements proto.Marshaller. +func (bs *LabelAdapter) MarshalTo(buf []byte) (n int, err error) { + var i int + ls := (*labels.Label)(bs) + + buf[i] = 0xa + i++ + i = encodeVarintCortex(buf, i, uint64(len(ls.Name))) + i += copy(buf[i:], ls.Name) + + buf[i] = 0x12 + i++ + i = encodeVarintCortex(buf, i, uint64(len(ls.Value))) + i += copy(buf[i:], ls.Value) + + return i, nil +} + +// Unmarshal a LabelAdapater, implements proto.Unmarshaller. +// NB this is a copy of the autogenerated code to unmarshal a LabelPair, +// with the byte copying replaced with a yoloString. +func (bs *LabelAdapter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelPair: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelPair: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + bs.Name = yoloString(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + bs.Value = yoloString(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func yoloString(buf []byte) string { + return *((*string)(unsafe.Pointer(&buf))) +} + +// Size implements proto.Sizer. +func (bs *LabelAdapter) Size() int { + ls := (*labels.Label)(bs) + var n int + l := len(ls.Name) + n += 1 + l + sovCortex(uint64(l)) + l = len(ls.Value) + n += 1 + l + sovCortex(uint64(l)) + return n +} + +// Equal implements proto.Equaler. +func (bs *LabelAdapter) Equal(other LabelAdapter) bool { + return bs.Name == other.Name && bs.Value == other.Value +} + +// Compare implements proto.Comparer. +func (bs *LabelAdapter) Compare(other LabelAdapter) int { + if c := strings.Compare(bs.Name, other.Name); c != 0 { + return c + } + return strings.Compare(bs.Value, other.Value) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/index/index.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/index/index.go index 860804df0bd7..6e072e70335e 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/index/index.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/index/index.go @@ -24,7 +24,7 @@ type InvertedIndex struct { func New() *InvertedIndex { shards := make([]indexShard, indexShards) for i := 0; i < indexShards; i++ { - shards[i].idx = map[model.LabelName]map[model.LabelValue][]model.Fingerprint{} + shards[i].idx = map[string]indexEntry{} } return &InvertedIndex{ shards: shards, @@ -32,9 +32,9 @@ func New() *InvertedIndex { } // Add a fingerprint under the specified labels. -func (ii *InvertedIndex) Add(labels []client.LabelPair, fp model.Fingerprint) { +func (ii *InvertedIndex) Add(labels []client.LabelAdapter, fp model.Fingerprint) labels.Labels { shard := &ii.shards[util.HashFP(fp)%indexShards] - shard.add(labels, fp) + return shard.add(labels, fp) } // Lookup all fingerprints for the provided matchers. @@ -49,32 +49,31 @@ func (ii *InvertedIndex) Lookup(matchers []*labels.Matcher) []model.Fingerprint result = append(result, fps...) } - sort.Sort(fingerprints(result)) return result } // LabelNames returns all label names. -func (ii *InvertedIndex) LabelNames() model.LabelNames { - results := make([]model.LabelNames, 0, indexShards) +func (ii *InvertedIndex) LabelNames() []string { + results := make([][]string, 0, indexShards) for i := range ii.shards { shardResult := ii.shards[i].labelNames() results = append(results, shardResult) } - return mergeLabelNameLists(results) + return mergeStringSlices(results) } // LabelValues returns the values for the given label. -func (ii *InvertedIndex) LabelValues(name model.LabelName) model.LabelValues { - results := make([]model.LabelValues, 0, indexShards) +func (ii *InvertedIndex) LabelValues(name string) []string { + results := make([][]string, 0, indexShards) for i := range ii.shards { shardResult := ii.shards[i].labelValues(name) results = append(results, shardResult) } - return mergeLabelValueLists(results) + return mergeStringSlices(results) } // Delete a fingerprint with the given label pairs. @@ -84,7 +83,17 @@ func (ii *InvertedIndex) Delete(labels labels.Labels, fp model.Fingerprint) { } // NB slice entries are sorted in fp order. -type unlockIndex map[model.LabelName]map[model.LabelValue][]model.Fingerprint +type indexEntry struct { + name string + fps map[string]indexValueEntry +} + +type indexValueEntry struct { + value string + fps []model.Fingerprint +} + +type unlockIndex map[string]indexEntry // This is the prevalent value for Intel and AMD CPUs as-at 2018. const cacheLineSize = 64 @@ -95,27 +104,44 @@ type indexShard struct { pad [cacheLineSize - unsafe.Sizeof(sync.Mutex{}) - unsafe.Sizeof(unlockIndex{})]byte } -func (shard *indexShard) add(metric []client.LabelPair, fp model.Fingerprint) { +func copyString(s string) string { + return string([]byte(s)) +} + +// add metric to the index; return all the name/value pairs as strings from the index, sorted +func (shard *indexShard) add(metric []client.LabelAdapter, fp model.Fingerprint) labels.Labels { shard.mtx.Lock() defer shard.mtx.Unlock() - for _, pair := range metric { - value := model.LabelValue(pair.Value) - values, ok := shard.idx[model.LabelName(pair.Name)] + internedLabels := make(labels.Labels, len(metric)) + + for i, pair := range metric { + values, ok := shard.idx[pair.Name] if !ok { - values = map[model.LabelValue][]model.Fingerprint{} - shard.idx[model.LabelName(pair.Name)] = values + values = indexEntry{ + name: copyString(pair.Name), + fps: map[string]indexValueEntry{}, + } + shard.idx[values.name] = values + } + fingerprints, ok := values.fps[pair.Value] + if !ok { + fingerprints = indexValueEntry{ + value: copyString(pair.Value), + } } - fingerprints := values[value] // Insert into the right position to keep fingerprints sorted - j := sort.Search(len(fingerprints), func(i int) bool { - return fingerprints[i] >= fp + j := sort.Search(len(fingerprints.fps), func(i int) bool { + return fingerprints.fps[i] >= fp }) - fingerprints = append(fingerprints, 0) - copy(fingerprints[j+1:], fingerprints[j:]) - fingerprints[j] = fp - values[value] = fingerprints + fingerprints.fps = append(fingerprints.fps, 0) + copy(fingerprints.fps[j+1:], fingerprints.fps[j:]) + fingerprints.fps[j] = fp + values.fps[fingerprints.value] = fingerprints + internedLabels[i] = labels.Label{Name: values.name, Value: fingerprints.value} } + sort.Sort(internedLabels) + return internedLabels } func (shard *indexShard) lookup(matchers []*labels.Matcher) []model.Fingerprint { @@ -129,20 +155,20 @@ func (shard *indexShard) lookup(matchers []*labels.Matcher) []model.Fingerprint // loop invariant: result is sorted var result []model.Fingerprint for _, matcher := range matchers { - values, ok := shard.idx[model.LabelName(matcher.Name)] + values, ok := shard.idx[matcher.Name] if !ok { return nil } var toIntersect model.Fingerprints if matcher.Type == labels.MatchEqual { - fps := values[model.LabelValue(matcher.Value)] - toIntersect = append(toIntersect, fps...) // deliberate copy + fps := values.fps[matcher.Value] + toIntersect = append(toIntersect, fps.fps...) // deliberate copy } else { // accumulate the matching fingerprints (which are all distinct) // then sort to maintain the invariant - for value, fps := range values { - if matcher.Matches(string(value)) { - toIntersect = append(toIntersect, fps...) + for value, fps := range values.fps { + if matcher.Matches(value) { + toIntersect = append(toIntersect, fps.fps...) } } sort.Sort(toIntersect) @@ -156,20 +182,20 @@ func (shard *indexShard) lookup(matchers []*labels.Matcher) []model.Fingerprint return result } -func (shard *indexShard) labelNames() model.LabelNames { +func (shard *indexShard) labelNames() []string { shard.mtx.RLock() defer shard.mtx.RUnlock() - results := make(model.LabelNames, 0, len(shard.idx)) + results := make([]string, 0, len(shard.idx)) for name := range shard.idx { results = append(results, name) } - sort.Sort(labelNames(results)) + sort.Strings(results) return results } -func (shard *indexShard) labelValues(name model.LabelName) model.LabelValues { +func (shard *indexShard) labelValues(name string) []string { shard.mtx.RLock() defer shard.mtx.RUnlock() @@ -178,12 +204,12 @@ func (shard *indexShard) labelValues(name model.LabelName) model.LabelValues { return nil } - results := make(model.LabelValues, 0, len(values)) - for val := range values { + results := make([]string, 0, len(values.fps)) + for val := range values.fps { results = append(results, val) } - sort.Sort(labelValues(results)) + sort.Strings(results) return results } @@ -192,28 +218,28 @@ func (shard *indexShard) delete(labels labels.Labels, fp model.Fingerprint) { defer shard.mtx.Unlock() for _, pair := range labels { - name, value := model.LabelName(pair.Name), model.LabelValue(pair.Value) + name, value := pair.Name, pair.Value values, ok := shard.idx[name] if !ok { continue } - fingerprints, ok := values[value] + fingerprints, ok := values.fps[value] if !ok { continue } - j := sort.Search(len(fingerprints), func(i int) bool { - return fingerprints[i] >= fp + j := sort.Search(len(fingerprints.fps), func(i int) bool { + return fingerprints.fps[i] >= fp }) - fingerprints = fingerprints[:j+copy(fingerprints[j:], fingerprints[j+1:])] + fingerprints.fps = fingerprints.fps[:j+copy(fingerprints.fps[j:], fingerprints.fps[j+1:])] - if len(fingerprints) == 0 { - delete(values, value) + if len(fingerprints.fps) == 0 { + delete(values.fps, value) } else { - values[value] = fingerprints + values.fps[value] = fingerprints } - if len(values) == 0 { + if len(values.fps) == 0 { delete(shard.idx, name) } else { shard.idx[name] = values @@ -241,42 +267,31 @@ func intersect(a, b []model.Fingerprint) []model.Fingerprint { return result } -type labelValues model.LabelValues - -func (a labelValues) Len() int { return len(a) } -func (a labelValues) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a labelValues) Less(i, j int) bool { return a[i] < a[j] } - -type labelNames model.LabelNames - -func (a labelNames) Len() int { return len(a) } -func (a labelNames) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a labelNames) Less(i, j int) bool { return a[i] < a[j] } - type fingerprints []model.Fingerprint func (a fingerprints) Len() int { return len(a) } func (a fingerprints) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a fingerprints) Less(i, j int) bool { return a[i] < a[j] } -func mergeLabelValueLists(lvss []model.LabelValues) model.LabelValues { - switch len(lvss) { +func mergeStringSlices(ss [][]string) []string { + switch len(ss) { case 0: return nil case 1: - return lvss[0] + return ss[0] case 2: - return mergeTwoLabelValueLists(lvss[0], lvss[1]) + return mergeTwoStringSlices(ss[0], ss[1]) default: - n := len(lvss) / 2 - left := mergeLabelValueLists(lvss[:n]) - right := mergeLabelValueLists(lvss[n:]) - return mergeTwoLabelValueLists(left, right) + halfway := len(ss) / 2 + return mergeTwoStringSlices( + mergeStringSlices(ss[:halfway]), + mergeStringSlices(ss[halfway:]), + ) } } -func mergeTwoLabelValueLists(a, b model.LabelValues) model.LabelValues { - result := make(model.LabelValues, 0, len(a)+len(b)) +func mergeTwoStringSlices(a, b []string) []string { + result := make([]string, 0, len(a)+len(b)) i, j := 0, 0 for i < len(a) && j < len(b) { if a[i] < b[j] { @@ -286,45 +301,8 @@ func mergeTwoLabelValueLists(a, b model.LabelValues) model.LabelValues { result = append(result, b[j]) j++ } else { - result = append(result, b[j]) - i++ - j++ - } - } - result = append(result, a[i:]...) - result = append(result, b[j:]...) - return result -} - -func mergeLabelNameLists(lnss []model.LabelNames) model.LabelNames { - switch len(lnss) { - case 0: - return nil - case 1: - return lnss[0] - case 2: - return mergeTwoLabelNameLists(lnss[0], lnss[1]) - default: - n := len(lnss) / 2 - left := mergeLabelNameLists(lnss[:n]) - right := mergeLabelNameLists(lnss[n:]) - return mergeTwoLabelNameLists(left, right) - } -} - -func mergeTwoLabelNameLists(a, b model.LabelNames) model.LabelNames { - result := make(model.LabelNames, 0, len(a)+len(b)) - i, j := 0, 0 - for i < len(a) && j < len(b) { - if a[i] < b[j] { result = append(result, a[i]) i++ - } else if a[i] > b[j] { - result = append(result, b[j]) - j++ - } else { - result = append(result, b[j]) - i++ j++ } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go index ef31aac07335..5a4d7b3dd903 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go @@ -30,6 +30,11 @@ var ( Name: "cortex_ingester_ring_tokens_to_own", Help: "The number of tokens to own in the ring.", }) + shutdownDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "cortex_shutdown_duration_seconds", + Help: "Duration (in seconds) of cortex shutdown procedure (ie transfer or flush).", + Buckets: prometheus.ExponentialBuckets(10, 2, 8), // Biggest bucket is 10*2^(9-1) = 2560, or 42 mins. + }, []string{"op", "status"}) ) // LifecyclerConfig is the config to build a Lifecycler. @@ -45,6 +50,7 @@ type LifecyclerConfig struct { ClaimOnRollout bool `yaml:"claim_on_rollout,omitempty"` NormaliseTokens bool `yaml:"normalise_tokens,omitempty"` InfNames []string `yaml:"interface_names"` + FinalSleep time.Duration `yaml:"final_sleep"` // For testing, you can override the address and ID of this ingester Addr string `yaml:"address"` @@ -63,6 +69,7 @@ func (cfg *LifecyclerConfig) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.MinReadyDuration, "ingester.min-ready-duration", 1*time.Minute, "Minimum duration to wait before becoming ready. This is to work around race conditions with ingesters exiting and updating the ring.") f.BoolVar(&cfg.ClaimOnRollout, "ingester.claim-on-rollout", false, "Send chunks to PENDING ingesters on exit.") f.BoolVar(&cfg.NormaliseTokens, "ingester.normalise-tokens", false, "Store tokens in a normalised fashion to reduce allocations.") + f.DurationVar(&cfg.FinalSleep, "ingester.final-sleep", 30*time.Second, "Duration to sleep for before exiting, to ensure metrics are scraped.") hostname, err := os.Hostname() if err != nil { @@ -454,16 +461,24 @@ func (i *Lifecycler) changeState(ctx context.Context, state IngesterState) error func (i *Lifecycler) processShutdown(ctx context.Context) { flushRequired := true if i.cfg.ClaimOnRollout { + transferStart := time.Now() if err := i.flushTransferer.TransferOut(ctx); err != nil { level.Error(util.Logger).Log("msg", "Failed to transfer chunks to another ingester", "err", err) + shutdownDuration.WithLabelValues("transfer", "fail").Observe(time.Since(transferStart).Seconds()) } else { flushRequired = false + shutdownDuration.WithLabelValues("transfer", "success").Observe(time.Since(transferStart).Seconds()) } } if flushRequired { + flushStart := time.Now() i.flushTransferer.Flush() + shutdownDuration.WithLabelValues("flush", "success").Observe(time.Since(flushStart).Seconds()) } + + // Sleep so the shutdownDuration metric can be collected. + time.Sleep(i.cfg.FinalSleep) } // unregister removes our entry from consul. diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go index e67bc62a436d..df54ce305a81 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go @@ -3,18 +3,17 @@ package ring -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "github.com/gogo/protobuf/gogoproto" - -import strconv "strconv" - -import strings "strings" -import reflect "reflect" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - -import io "io" +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" + math "math" + reflect "reflect" + strconv "strconv" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -42,6 +41,7 @@ var IngesterState_name = map[int32]string{ 2: "PENDING", 3: "JOINING", } + var IngesterState_value = map[string]int32{ "ACTIVE": 0, "LEAVING": 1, @@ -50,18 +50,18 @@ var IngesterState_value = map[string]int32{ } func (IngesterState) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_ring_35bba6cb303d16e3, []int{0} + return fileDescriptor_7ebe6ffe1686e76b, []int{0} } type Desc struct { - Ingesters map[string]IngesterDesc `protobuf:"bytes,1,rep,name=ingesters" json:"ingesters" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"` - Tokens []TokenDesc `protobuf:"bytes,2,rep,name=tokens" json:"tokens"` + Ingesters map[string]IngesterDesc `protobuf:"bytes,1,rep,name=ingesters,proto3" json:"ingesters" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Tokens []TokenDesc `protobuf:"bytes,2,rep,name=tokens,proto3" json:"tokens"` } func (m *Desc) Reset() { *m = Desc{} } func (*Desc) ProtoMessage() {} func (*Desc) Descriptor() ([]byte, []int) { - return fileDescriptor_ring_35bba6cb303d16e3, []int{0} + return fileDescriptor_7ebe6ffe1686e76b, []int{0} } func (m *Desc) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -78,8 +78,8 @@ func (m *Desc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } -func (dst *Desc) XXX_Merge(src proto.Message) { - xxx_messageInfo_Desc.Merge(dst, src) +func (m *Desc) XXX_Merge(src proto.Message) { + xxx_messageInfo_Desc.Merge(m, src) } func (m *Desc) XXX_Size() int { return m.Size() @@ -108,13 +108,13 @@ type IngesterDesc struct { Addr string `protobuf:"bytes,1,opt,name=addr,proto3" json:"addr,omitempty"` Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` State IngesterState `protobuf:"varint,3,opt,name=state,proto3,enum=ring.IngesterState" json:"state,omitempty"` - Tokens []uint32 `protobuf:"varint,6,rep,packed,name=tokens" json:"tokens,omitempty"` + Tokens []uint32 `protobuf:"varint,6,rep,packed,name=tokens,proto3" json:"tokens,omitempty"` } func (m *IngesterDesc) Reset() { *m = IngesterDesc{} } func (*IngesterDesc) ProtoMessage() {} func (*IngesterDesc) Descriptor() ([]byte, []int) { - return fileDescriptor_ring_35bba6cb303d16e3, []int{1} + return fileDescriptor_7ebe6ffe1686e76b, []int{1} } func (m *IngesterDesc) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -131,8 +131,8 @@ func (m *IngesterDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return b[:n], nil } } -func (dst *IngesterDesc) XXX_Merge(src proto.Message) { - xxx_messageInfo_IngesterDesc.Merge(dst, src) +func (m *IngesterDesc) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngesterDesc.Merge(m, src) } func (m *IngesterDesc) XXX_Size() int { return m.Size() @@ -179,7 +179,7 @@ type TokenDesc struct { func (m *TokenDesc) Reset() { *m = TokenDesc{} } func (*TokenDesc) ProtoMessage() {} func (*TokenDesc) Descriptor() ([]byte, []int) { - return fileDescriptor_ring_35bba6cb303d16e3, []int{2} + return fileDescriptor_7ebe6ffe1686e76b, []int{2} } func (m *TokenDesc) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -196,8 +196,8 @@ func (m *TokenDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } -func (dst *TokenDesc) XXX_Merge(src proto.Message) { - xxx_messageInfo_TokenDesc.Merge(dst, src) +func (m *TokenDesc) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenDesc.Merge(m, src) } func (m *TokenDesc) XXX_Size() int { return m.Size() @@ -223,12 +223,49 @@ func (m *TokenDesc) GetIngester() string { } func init() { + proto.RegisterEnum("ring.IngesterState", IngesterState_name, IngesterState_value) proto.RegisterType((*Desc)(nil), "ring.Desc") proto.RegisterMapType((map[string]IngesterDesc)(nil), "ring.Desc.IngestersEntry") proto.RegisterType((*IngesterDesc)(nil), "ring.IngesterDesc") proto.RegisterType((*TokenDesc)(nil), "ring.TokenDesc") - proto.RegisterEnum("ring.IngesterState", IngesterState_name, IngesterState_value) } + +func init() { + proto.RegisterFile("github.com/cortexproject/cortex/pkg/ring/ring.proto", fileDescriptor_7ebe6ffe1686e76b) +} + +var fileDescriptor_7ebe6ffe1686e76b = []byte{ + // 440 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x92, 0xcf, 0x6e, 0xd3, 0x40, + 0x10, 0xc6, 0x77, 0xe2, 0x3f, 0xc4, 0x13, 0x52, 0xac, 0x05, 0x21, 0x13, 0xa1, 0xc5, 0xca, 0xc9, + 0x20, 0x35, 0x91, 0x52, 0x0e, 0x08, 0xa9, 0x87, 0x86, 0x46, 0x28, 0x11, 0x0a, 0x95, 0xa9, 0x7a, + 0x4f, 0xd2, 0xc5, 0x84, 0x90, 0xac, 0x65, 0x6f, 0x10, 0xbd, 0xf1, 0x06, 0xf0, 0x18, 0x3c, 0x09, + 0xea, 0x31, 0xc7, 0x9e, 0x10, 0x71, 0x2e, 0x1c, 0xfb, 0x08, 0x68, 0xd7, 0x76, 0x9a, 0x5c, 0xac, + 0xf9, 0xed, 0x37, 0xdf, 0xb7, 0x33, 0xd6, 0xe2, 0x51, 0x34, 0x95, 0x9f, 0x96, 0xe3, 0xd6, 0x44, + 0xcc, 0xdb, 0x13, 0x91, 0x48, 0xfe, 0x2d, 0x4e, 0xc4, 0x67, 0x3e, 0x91, 0x05, 0xb5, 0xe3, 0x59, + 0xd4, 0x4e, 0xa6, 0x8b, 0xfc, 0xd3, 0x8a, 0x13, 0x21, 0x05, 0x35, 0x55, 0xdd, 0x38, 0xdc, 0xb1, + 0x46, 0x22, 0x12, 0x6d, 0x2d, 0x8e, 0x97, 0x1f, 0x35, 0x69, 0xd0, 0x55, 0x6e, 0x6a, 0xfe, 0x06, + 0x34, 0x4f, 0x79, 0x3a, 0xa1, 0xc7, 0xe8, 0x4c, 0x17, 0x11, 0x4f, 0x25, 0x4f, 0x52, 0x0f, 0x7c, + 0x23, 0xa8, 0x75, 0x9e, 0xb4, 0x74, 0xba, 0x92, 0x5b, 0xfd, 0x52, 0xeb, 0x2d, 0x64, 0x72, 0xd5, + 0x35, 0xaf, 0xff, 0x3c, 0x23, 0xe1, 0x9d, 0x83, 0x1e, 0xa2, 0x2d, 0xc5, 0x8c, 0x2f, 0x52, 0xaf, + 0xa2, 0xbd, 0x0f, 0x72, 0xef, 0xb9, 0x3a, 0x53, 0x01, 0x85, 0xa3, 0x68, 0x6a, 0x9c, 0xe1, 0xc1, + 0x7e, 0x22, 0x75, 0xd1, 0x98, 0xf1, 0x2b, 0x0f, 0x7c, 0x08, 0x9c, 0x50, 0x95, 0x34, 0x40, 0xeb, + 0xeb, 0xe8, 0xcb, 0x92, 0x7b, 0x15, 0x1f, 0x82, 0x5a, 0x87, 0xe6, 0x89, 0xa5, 0x4d, 0x85, 0x86, + 0x79, 0xc3, 0xeb, 0xca, 0x2b, 0x68, 0xfe, 0x00, 0xbc, 0xbf, 0xab, 0x51, 0x8a, 0xe6, 0xe8, 0xf2, + 0x32, 0x29, 0x12, 0x75, 0x4d, 0x9f, 0xa2, 0x23, 0xa7, 0x73, 0x9e, 0xca, 0xd1, 0x3c, 0xd6, 0xb1, + 0x46, 0x78, 0x77, 0x40, 0x9f, 0xa3, 0x95, 0xca, 0x91, 0xe4, 0x9e, 0xe1, 0x43, 0x70, 0xd0, 0x79, + 0xb8, 0x7f, 0xe1, 0x07, 0x25, 0x85, 0x79, 0x07, 0x7d, 0xbc, 0x5d, 0xd7, 0xf6, 0x8d, 0xa0, 0x5e, + 0xee, 0x35, 0x30, 0xab, 0xa6, 0x6b, 0x0d, 0xcc, 0xaa, 0xe5, 0xda, 0xcd, 0x63, 0x74, 0xb6, 0xeb, + 0xd3, 0x47, 0x68, 0xe9, 0x16, 0x3d, 0x4e, 0x3d, 0xcc, 0x81, 0x36, 0xb0, 0x5a, 0xfe, 0x42, 0x3d, + 0x8e, 0x13, 0x6e, 0xf9, 0x45, 0x17, 0xeb, 0x7b, 0x57, 0x53, 0x44, 0xfb, 0xe4, 0xcd, 0x79, 0xff, + 0xa2, 0xe7, 0x12, 0x5a, 0xc3, 0x7b, 0xef, 0x7a, 0x27, 0x17, 0xfd, 0xe1, 0x5b, 0x17, 0x14, 0x9c, + 0xf5, 0x86, 0xa7, 0x0a, 0x2a, 0x0a, 0x06, 0xef, 0xfb, 0x43, 0x05, 0x46, 0xf7, 0xe5, 0x6a, 0xcd, + 0xc8, 0xcd, 0x9a, 0x91, 0xdb, 0x35, 0x83, 0xef, 0x19, 0x83, 0x5f, 0x19, 0x83, 0xeb, 0x8c, 0xc1, + 0x2a, 0x63, 0xf0, 0x37, 0x63, 0xf0, 0x2f, 0x63, 0xe4, 0x36, 0x63, 0xf0, 0x73, 0xc3, 0xc8, 0x6a, + 0xc3, 0xc8, 0xcd, 0x86, 0x91, 0xb1, 0xad, 0x9f, 0xc6, 0xd1, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, + 0xab, 0x96, 0x85, 0x85, 0x86, 0x02, 0x00, 0x00, +} + func (x IngesterState) String() string { s, ok := IngesterState_name[int32(x)] if ok { @@ -435,9 +472,9 @@ func (m *Desc) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintRing(dAtA, i, uint64((&v).Size())) - n1, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + n1, err1 := (&v).MarshalTo(dAtA[i:]) + if err1 != nil { + return 0, err1 } i += n1 } @@ -629,6 +666,11 @@ func (this *Desc) String() string { if this == nil { return "nil" } + repeatedStringForTokens := "[]TokenDesc{" + for _, f := range this.Tokens { + repeatedStringForTokens += strings.Replace(strings.Replace(f.String(), "TokenDesc", "TokenDesc", 1), `&`, ``, 1) + "," + } + repeatedStringForTokens += "}" keysForIngesters := make([]string, 0, len(this.Ingesters)) for k, _ := range this.Ingesters { keysForIngesters = append(keysForIngesters, k) @@ -641,7 +683,7 @@ func (this *Desc) String() string { mapStringForIngesters += "}" s := strings.Join([]string{`&Desc{`, `Ingesters:` + mapStringForIngesters + `,`, - `Tokens:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Tokens), "TokenDesc", "TokenDesc", 1), `&`, ``, 1) + `,`, + `Tokens:` + repeatedStringForTokens + `,`, `}`, }, "") return s @@ -693,7 +735,7 @@ func (m *Desc) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -721,7 +763,7 @@ func (m *Desc) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -730,6 +772,9 @@ func (m *Desc) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRing } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRing + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -750,7 +795,7 @@ func (m *Desc) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -767,7 +812,7 @@ func (m *Desc) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -777,6 +822,9 @@ func (m *Desc) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRing } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthRing + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -793,7 +841,7 @@ func (m *Desc) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -802,7 +850,7 @@ func (m *Desc) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRing } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthRing } if postmsgIndex > l { @@ -844,7 +892,7 @@ func (m *Desc) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -853,6 +901,9 @@ func (m *Desc) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRing } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRing + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -870,6 +921,9 @@ func (m *Desc) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRing } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRing + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -897,7 +951,7 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -925,7 +979,7 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -935,6 +989,9 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRing } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRing + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -954,7 +1011,7 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Timestamp |= (int64(b) & 0x7F) << shift + m.Timestamp |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -973,7 +1030,7 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.State |= (IngesterState(b) & 0x7F) << shift + m.State |= IngesterState(b&0x7F) << shift if b < 0x80 { break } @@ -990,7 +1047,7 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (uint32(b) & 0x7F) << shift + v |= uint32(b&0x7F) << shift if b < 0x80 { break } @@ -1007,7 +1064,7 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - packedLen |= (int(b) & 0x7F) << shift + packedLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1016,12 +1073,15 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRing } postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthRing + } if postIndex > l { return io.ErrUnexpectedEOF } var elementCount int var count int - for _, integer := range dAtA { + for _, integer := range dAtA[iNdEx:postIndex] { if integer < 128 { count++ } @@ -1041,7 +1101,7 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (uint32(b) & 0x7F) << shift + v |= uint32(b&0x7F) << shift if b < 0x80 { break } @@ -1060,6 +1120,9 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRing } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRing + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1087,7 +1150,7 @@ func (m *TokenDesc) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1115,7 +1178,7 @@ func (m *TokenDesc) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Token |= (uint32(b) & 0x7F) << shift + m.Token |= uint32(b&0x7F) << shift if b < 0x80 { break } @@ -1134,7 +1197,7 @@ func (m *TokenDesc) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1144,6 +1207,9 @@ func (m *TokenDesc) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRing } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRing + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1158,6 +1224,9 @@ func (m *TokenDesc) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRing } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRing + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1224,10 +1293,13 @@ func skipRing(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthRing } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthRing + } return iNdEx, nil case 3: for { @@ -1256,6 +1328,9 @@ func skipRing(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthRing + } } return iNdEx, nil case 4: @@ -1274,39 +1349,3 @@ var ( ErrInvalidLengthRing = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowRing = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("github.com/cortexproject/cortex/pkg/ring/ring.proto", fileDescriptor_ring_35bba6cb303d16e3) -} - -var fileDescriptor_ring_35bba6cb303d16e3 = []byte{ - // 440 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x92, 0xcf, 0x6e, 0xd3, 0x40, - 0x10, 0xc6, 0x77, 0xe2, 0x3f, 0xc4, 0x13, 0x52, 0xac, 0x05, 0x21, 0x13, 0xa1, 0xc5, 0xca, 0xc9, - 0x20, 0x35, 0x91, 0x52, 0x0e, 0x08, 0xa9, 0x87, 0x86, 0x46, 0x28, 0x11, 0x0a, 0x95, 0xa9, 0x7a, - 0x4f, 0xd2, 0xc5, 0x84, 0x90, 0xac, 0x65, 0x6f, 0x10, 0xbd, 0xf1, 0x06, 0xf0, 0x18, 0x3c, 0x09, - 0xea, 0x31, 0xc7, 0x9e, 0x10, 0x71, 0x2e, 0x1c, 0xfb, 0x08, 0x68, 0xd7, 0x76, 0x9a, 0x5c, 0xac, - 0xf9, 0xed, 0x37, 0xdf, 0xb7, 0x33, 0xd6, 0xe2, 0x51, 0x34, 0x95, 0x9f, 0x96, 0xe3, 0xd6, 0x44, - 0xcc, 0xdb, 0x13, 0x91, 0x48, 0xfe, 0x2d, 0x4e, 0xc4, 0x67, 0x3e, 0x91, 0x05, 0xb5, 0xe3, 0x59, - 0xd4, 0x4e, 0xa6, 0x8b, 0xfc, 0xd3, 0x8a, 0x13, 0x21, 0x05, 0x35, 0x55, 0xdd, 0x38, 0xdc, 0xb1, - 0x46, 0x22, 0x12, 0x6d, 0x2d, 0x8e, 0x97, 0x1f, 0x35, 0x69, 0xd0, 0x55, 0x6e, 0x6a, 0xfe, 0x06, - 0x34, 0x4f, 0x79, 0x3a, 0xa1, 0xc7, 0xe8, 0x4c, 0x17, 0x11, 0x4f, 0x25, 0x4f, 0x52, 0x0f, 0x7c, - 0x23, 0xa8, 0x75, 0x9e, 0xb4, 0x74, 0xba, 0x92, 0x5b, 0xfd, 0x52, 0xeb, 0x2d, 0x64, 0x72, 0xd5, - 0x35, 0xaf, 0xff, 0x3c, 0x23, 0xe1, 0x9d, 0x83, 0x1e, 0xa2, 0x2d, 0xc5, 0x8c, 0x2f, 0x52, 0xaf, - 0xa2, 0xbd, 0x0f, 0x72, 0xef, 0xb9, 0x3a, 0x53, 0x01, 0x85, 0xa3, 0x68, 0x6a, 0x9c, 0xe1, 0xc1, - 0x7e, 0x22, 0x75, 0xd1, 0x98, 0xf1, 0x2b, 0x0f, 0x7c, 0x08, 0x9c, 0x50, 0x95, 0x34, 0x40, 0xeb, - 0xeb, 0xe8, 0xcb, 0x92, 0x7b, 0x15, 0x1f, 0x82, 0x5a, 0x87, 0xe6, 0x89, 0xa5, 0x4d, 0x85, 0x86, - 0x79, 0xc3, 0xeb, 0xca, 0x2b, 0x68, 0xfe, 0x00, 0xbc, 0xbf, 0xab, 0x51, 0x8a, 0xe6, 0xe8, 0xf2, - 0x32, 0x29, 0x12, 0x75, 0x4d, 0x9f, 0xa2, 0x23, 0xa7, 0x73, 0x9e, 0xca, 0xd1, 0x3c, 0xd6, 0xb1, - 0x46, 0x78, 0x77, 0x40, 0x9f, 0xa3, 0x95, 0xca, 0x91, 0xe4, 0x9e, 0xe1, 0x43, 0x70, 0xd0, 0x79, - 0xb8, 0x7f, 0xe1, 0x07, 0x25, 0x85, 0x79, 0x07, 0x7d, 0xbc, 0x5d, 0xd7, 0xf6, 0x8d, 0xa0, 0x5e, - 0xee, 0x35, 0x30, 0xab, 0xa6, 0x6b, 0x0d, 0xcc, 0xaa, 0xe5, 0xda, 0xcd, 0x63, 0x74, 0xb6, 0xeb, - 0xd3, 0x47, 0x68, 0xe9, 0x16, 0x3d, 0x4e, 0x3d, 0xcc, 0x81, 0x36, 0xb0, 0x5a, 0xfe, 0x42, 0x3d, - 0x8e, 0x13, 0x6e, 0xf9, 0x45, 0x17, 0xeb, 0x7b, 0x57, 0x53, 0x44, 0xfb, 0xe4, 0xcd, 0x79, 0xff, - 0xa2, 0xe7, 0x12, 0x5a, 0xc3, 0x7b, 0xef, 0x7a, 0x27, 0x17, 0xfd, 0xe1, 0x5b, 0x17, 0x14, 0x9c, - 0xf5, 0x86, 0xa7, 0x0a, 0x2a, 0x0a, 0x06, 0xef, 0xfb, 0x43, 0x05, 0x46, 0xf7, 0xe5, 0x6a, 0xcd, - 0xc8, 0xcd, 0x9a, 0x91, 0xdb, 0x35, 0x83, 0xef, 0x19, 0x83, 0x5f, 0x19, 0x83, 0xeb, 0x8c, 0xc1, - 0x2a, 0x63, 0xf0, 0x37, 0x63, 0xf0, 0x2f, 0x63, 0xe4, 0x36, 0x63, 0xf0, 0x73, 0xc3, 0xc8, 0x6a, - 0xc3, 0xc8, 0xcd, 0x86, 0x91, 0xb1, 0xad, 0x9f, 0xc6, 0xd1, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, - 0xab, 0x96, 0x85, 0x85, 0x86, 0x02, 0x00, 0x00, -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/extract/extract.go b/vendor/github.com/cortexproject/cortex/pkg/util/extract/extract.go index e170382f1834..8de926d83e2c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/extract/extract.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/extract/extract.go @@ -8,16 +8,14 @@ import ( "github.com/prometheus/prometheus/pkg/labels" ) -var labelNameBytes = []byte(model.MetricNameLabel) - -// MetricNameFromLabelPairs extracts the metric name from a list of LabelPairs. -func MetricNameFromLabelPairs(labels []client.LabelPair) ([]byte, error) { +// MetricNameFromLabelAdapters extracts the metric name from a list of LabelPairs. +func MetricNameFromLabelAdapters(labels []client.LabelAdapter) (string, error) { for _, label := range labels { - if label.Name.Equal(labelNameBytes) { + if label.Name == model.MetricNameLabel { return label.Value, nil } } - return nil, fmt.Errorf("No metric name label") + return "", fmt.Errorf("No metric name label") } // MetricNameFromMetric extract the metric name from a model.Metric diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/flagext/deprecated.go b/vendor/github.com/cortexproject/cortex/pkg/util/flagext/deprecated.go new file mode 100644 index 000000000000..d61c99b2028e --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/util/flagext/deprecated.go @@ -0,0 +1,26 @@ +package flagext + +import ( + "flag" + + "github.com/cortexproject/cortex/pkg/util" + "github.com/go-kit/kit/log/level" +) + +type deprecatedFlag struct { + name string +} + +func (deprecatedFlag) String() string { + return "deprecated" +} + +func (d deprecatedFlag) Set(string) error { + level.Warn(util.Logger).Log("msg", "flag disabled", "flag", d.name) + return nil +} + +// DeprecatedFlag logs a warning when you try to use it. +func DeprecatedFlag(f *flag.FlagSet, name, message string) { + f.Var(deprecatedFlag{name}, name, message) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/hash_fp.go b/vendor/github.com/cortexproject/cortex/pkg/util/hash_fp.go index ba0a03801e2e..209b8b45c064 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/hash_fp.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/hash_fp.go @@ -9,6 +9,6 @@ import "github.com/prometheus/common/model" // function we use is prone to only change a few bits for similar metrics. We // really want to make use of every change in the fingerprint to vary mutex // selection.) -func HashFP(fp model.Fingerprint) uint { - return uint(fp ^ (fp >> 32) ^ (fp >> 16)) +func HashFP(fp model.Fingerprint) uint32 { + return uint32(fp ^ (fp >> 32) ^ (fp >> 16)) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/log.go b/vendor/github.com/cortexproject/cortex/pkg/util/log.go index 6460e2dd1475..908378ab4741 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/log.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/log.go @@ -44,8 +44,14 @@ func InitLogger(cfg *server.Config) { panic(err) } - Logger = l - cfg.Log = logging.GoKit(l) + // when use util.Logger, skip 3 stack frames. + Logger = log.With(l, "caller", log.Caller(3)) + + // cfg.Log wraps log function, skip 4 stack frames to get caller information. + // this works in go 1.12, but doesn't work in versions earlier. + // it will always shows the wrapper function generated by compiler + // marked in old versions. + cfg.Log = logging.GoKit(log.With(l, "caller", log.Caller(4))) } // PrometheusLogger exposes Prometheus counters for each of go-kit's log levels. @@ -68,8 +74,8 @@ func NewPrometheusLogger(l logging.Level) (log.Logger, error) { logger: logger, } - // DefaultCaller must be the last wrapper - logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) + // return a Logger without caller information, shouldn't use directly + logger = log.With(logger, "ts", log.DefaultTimestampUTC) return logger, nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go index a2186c96a901..0837b86ab1c3 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go @@ -3,8 +3,6 @@ package validation import ( "flag" "time" - - "github.com/cortexproject/cortex/pkg/util/flagext" ) // Limits describe all the limits for users; can be used to describe global default @@ -28,8 +26,10 @@ type Limits struct { MaxSeriesPerMetric int `yaml:"max_series_per_metric"` // Querier enforced limits. - MaxChunksPerQuery int `yaml:"max_chunks_per_query"` - MaxQueryLength time.Duration `yaml:"max_query_length"` + MaxChunksPerQuery int `yaml:"max_chunks_per_query"` + MaxQueryLength time.Duration `yaml:"max_query_length"` + MaxQueryParallelism int `yaml:"max_query_parallelism"` + CardinalityLimit int `yaml:"cardinality_limit"` // Config for overrides, convenient if it goes here. PerTenantOverrideConfig string @@ -55,6 +55,8 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.IntVar(&l.MaxChunksPerQuery, "store.query-chunk-limit", 2e6, "Maximum number of chunks that can be fetched in a single query.") f.DurationVar(&l.MaxQueryLength, "store.max-query-length", 0, "Limit to length of chunk store queries, 0 to disable.") + f.IntVar(&l.MaxQueryParallelism, "querier.max-query-parallelism", 14, "Maximum number of queries will be scheduled in parallel by the frontend.") + f.IntVar(&l.CardinalityLimit, "store.cardinality-limit", 1e5, "Cardinality limit for index queries.") f.StringVar(&l.PerTenantOverrideConfig, "limits.per-user-override-config", "", "File name of per-user overrides.") f.DurationVar(&l.PerTenantOverridePeriod, "limits.per-user-override-period", 10*time.Second, "Period with this to reload the overrides.") @@ -65,7 +67,7 @@ func (l *Limits) UnmarshalYAML(unmarshal func(interface{}) error) error { // We want to set c to the defaults and then overwrite it with the input. // To make unmarshal fill the plain data struct rather than calling UnmarshalYAML // again, we have to hide it using a type indirection. See prometheus/config. - flagext.DefaultValues(l) + *l = defaultLimits type plain Limits return unmarshal((*plain)(l)) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/override.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/override.go index ce810e69e9b7..a2553f7de813 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/override.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/override.go @@ -18,6 +18,12 @@ var overridesReloadSuccess = promauto.NewGauge(prometheus.GaugeOpts{ Help: "Whether the last overrides reload attempt was successful.", }) +// When we load YAML from disk, we want the various per-customer limits +// to default to any values specified on the command line, not default +// command line values. This global contains those values. I (Tom) cannot +// find a nicer way I'm afraid. +var defaultLimits Limits + // Overrides periodically fetch a set of per-user overrides, and provides convenience // functions for fetching the correct value. type Overrides struct { @@ -28,7 +34,12 @@ type Overrides struct { } // NewOverrides makes a new Overrides. +// We store the supplied limits in a global variable to ensure per-tenant limits +// are defaulted to those values. As such, the last call to NewOverrides will +// become the new global defaults. func NewOverrides(defaults Limits) (*Overrides, error) { + defaultLimits = defaults + if defaults.PerTenantOverrideConfig == "" { level.Info(util.Logger).Log("msg", "per-tenant overides disabled") return &Overrides{ @@ -242,9 +253,24 @@ func (o *Overrides) MaxQueryLength(userID string) time.Duration { }) } +// MaxQueryParallelism returns the limit to the number of sub-queries the +// frontend will process in parallel. +func (o *Overrides) MaxQueryParallelism(userID string) int { + return o.getInt(userID, func(l *Limits) int { + return l.MaxQueryParallelism + }) +} + // EnforceMetricName whether to enforce the presence of a metric name. func (o *Overrides) EnforceMetricName(userID string) bool { return o.getBool(userID, func(l *Limits) bool { return l.EnforceMetricName }) } + +// CardinalityLimit whether to enforce the presence of a metric name. +func (o *Overrides) CardinalityLimit(userID string) int { + return o.getInt(userID, func(l *Limits) int { + return l.CardinalityLimit + }) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go index 750ceb25b910..3aff0fa72d7a 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go @@ -22,6 +22,9 @@ const ( errTooOld = "sample for '%s' has timestamp too old: %d" errTooNew = "sample for '%s' has timestamp too new: %d" + // ErrQueryTooLong is used in chunk store and query frontend. + ErrQueryTooLong = "invalid query, length > limit (%s > %s)" + greaterThanMaxSampleAge = "greater_than_max_sample_age" maxLabelNamesPerSeries = "max_label_names_per_series" tooFarInFuture = "too_far_in_future" @@ -48,7 +51,7 @@ func init() { } // ValidateSample returns an err if the sample is invalid. -func (cfg *Overrides) ValidateSample(userID string, metricName []byte, s client.Sample) error { +func (cfg *Overrides) ValidateSample(userID string, metricName string, s client.Sample) error { if cfg.RejectOldSamples(userID) && model.Time(s.TimestampMs) < model.Now().Add(-cfg.RejectOldSamplesMaxAge(userID)) { DiscardedSamples.WithLabelValues(greaterThanMaxSampleAge, userID).Inc() return httpgrpc.Errorf(http.StatusBadRequest, errTooOld, metricName, model.Time(s.TimestampMs)) @@ -63,8 +66,8 @@ func (cfg *Overrides) ValidateSample(userID string, metricName []byte, s client. } // ValidateLabels returns an err if the labels are invalid. -func (cfg *Overrides) ValidateLabels(userID string, ls []client.LabelPair) error { - metricName, err := extract.MetricNameFromLabelPairs(ls) +func (cfg *Overrides) ValidateLabels(userID string, ls []client.LabelAdapter) error { + metricName, err := extract.MetricNameFromLabelAdapters(ls) if cfg.EnforceMetricName(userID) { if err != nil { return httpgrpc.Errorf(http.StatusBadRequest, errMissingMetricName) @@ -78,7 +81,7 @@ func (cfg *Overrides) ValidateLabels(userID string, ls []client.LabelPair) error numLabelNames := len(ls) if numLabelNames > cfg.MaxLabelNamesPerSeries(userID) { DiscardedSamples.WithLabelValues(maxLabelNamesPerSeries, userID).Inc() - return httpgrpc.Errorf(http.StatusBadRequest, errTooManyLabels, client.FromLabelPairs(ls).String(), numLabelNames, cfg.MaxLabelNamesPerSeries(userID)) + return httpgrpc.Errorf(http.StatusBadRequest, errTooManyLabels, client.FromLabelAdaptersToMetric(ls).String(), numLabelNames, cfg.MaxLabelNamesPerSeries(userID)) } maxLabelNameLength := cfg.MaxLabelNameLength(userID) @@ -102,7 +105,7 @@ func (cfg *Overrides) ValidateLabels(userID string, ls []client.LabelPair) error } if errTemplate != "" { DiscardedSamples.WithLabelValues(reason, userID).Inc() - return httpgrpc.Errorf(http.StatusBadRequest, errTemplate, cause, client.FromLabelPairs(ls).String()) + return httpgrpc.Errorf(http.StatusBadRequest, errTemplate, cause, client.FromLabelAdaptersToMetric(ls).String()) } } return nil