|
| 1 | +package tsdb |
| 2 | + |
| 3 | +import ( |
| 4 | + "context" |
| 5 | + "reflect" |
| 6 | + "unsafe" |
| 7 | + |
| 8 | + "github.com/VictoriaMetrics/fastcache" |
| 9 | + "github.com/go-kit/log" |
| 10 | + "github.com/go-kit/log/level" |
| 11 | + "github.com/oklog/ulid" |
| 12 | + "github.com/pkg/errors" |
| 13 | + "github.com/prometheus/client_golang/prometheus" |
| 14 | + "github.com/prometheus/client_golang/prometheus/promauto" |
| 15 | + "github.com/prometheus/prometheus/model/labels" |
| 16 | + "github.com/prometheus/prometheus/storage" |
| 17 | + |
| 18 | + storecache "github.com/thanos-io/thanos/pkg/store/cache" |
| 19 | + "github.com/thanos-io/thanos/pkg/tenancy" |
| 20 | +) |
| 21 | + |
| 22 | +type InMemoryIndexCache struct { |
| 23 | + logger log.Logger |
| 24 | + cache *fastcache.Cache |
| 25 | + maxItemSizeBytes uint64 |
| 26 | + |
| 27 | + added *prometheus.CounterVec |
| 28 | + overflow *prometheus.CounterVec |
| 29 | + |
| 30 | + commonMetrics *storecache.CommonMetrics |
| 31 | +} |
| 32 | + |
| 33 | +// NewInMemoryIndexCacheWithConfig creates a new thread-safe cache for index entries. It relies on the cache library |
| 34 | +// (fastcache) to ensures the total cache size approximately does not exceed maxBytes. |
| 35 | +func NewInMemoryIndexCacheWithConfig(logger log.Logger, commonMetrics *storecache.CommonMetrics, reg prometheus.Registerer, config storecache.InMemoryIndexCacheConfig) (*InMemoryIndexCache, error) { |
| 36 | + if config.MaxItemSize > config.MaxSize { |
| 37 | + return nil, errors.Errorf("max item size (%v) cannot be bigger than overall cache size (%v)", config.MaxItemSize, config.MaxSize) |
| 38 | + } |
| 39 | + |
| 40 | + // fastcache will panic if MaxSize <= 0. |
| 41 | + if config.MaxSize <= 0 { |
| 42 | + config.MaxSize = storecache.DefaultInMemoryIndexCacheConfig.MaxSize |
| 43 | + } |
| 44 | + |
| 45 | + if commonMetrics == nil { |
| 46 | + commonMetrics = storecache.NewCommonMetrics(reg) |
| 47 | + } |
| 48 | + |
| 49 | + c := &InMemoryIndexCache{ |
| 50 | + logger: logger, |
| 51 | + maxItemSizeBytes: uint64(config.MaxItemSize), |
| 52 | + commonMetrics: commonMetrics, |
| 53 | + } |
| 54 | + |
| 55 | + c.added = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ |
| 56 | + Name: "thanos_store_index_cache_items_added_total", |
| 57 | + Help: "Total number of items that were added to the index cache.", |
| 58 | + }, []string{"item_type"}) |
| 59 | + c.added.WithLabelValues(cacheTypePostings) |
| 60 | + c.added.WithLabelValues(cacheTypeSeries) |
| 61 | + c.added.WithLabelValues(cacheTypeExpandedPostings) |
| 62 | + |
| 63 | + c.commonMetrics.RequestTotal.WithLabelValues(cacheTypePostings, tenancy.DefaultTenant) |
| 64 | + c.commonMetrics.RequestTotal.WithLabelValues(cacheTypeSeries, tenancy.DefaultTenant) |
| 65 | + c.commonMetrics.RequestTotal.WithLabelValues(cacheTypeExpandedPostings, tenancy.DefaultTenant) |
| 66 | + |
| 67 | + c.overflow = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ |
| 68 | + Name: "thanos_store_index_cache_items_overflowed_total", |
| 69 | + Help: "Total number of items that could not be added to the cache due to being too big.", |
| 70 | + }, []string{"item_type"}) |
| 71 | + c.overflow.WithLabelValues(cacheTypePostings) |
| 72 | + c.overflow.WithLabelValues(cacheTypeSeries) |
| 73 | + c.overflow.WithLabelValues(cacheTypeExpandedPostings) |
| 74 | + |
| 75 | + c.commonMetrics.HitsTotal.WithLabelValues(cacheTypePostings, tenancy.DefaultTenant) |
| 76 | + c.commonMetrics.HitsTotal.WithLabelValues(cacheTypeSeries, tenancy.DefaultTenant) |
| 77 | + c.commonMetrics.HitsTotal.WithLabelValues(cacheTypeExpandedPostings, tenancy.DefaultTenant) |
| 78 | + |
| 79 | + c.cache = fastcache.New(int(config.MaxSize)) |
| 80 | + level.Info(logger).Log( |
| 81 | + "msg", "created in-memory index cache", |
| 82 | + "maxItemSizeBytes", c.maxItemSizeBytes, |
| 83 | + "maxSizeBytes", config.MaxSize, |
| 84 | + ) |
| 85 | + return c, nil |
| 86 | +} |
| 87 | + |
| 88 | +func (c *InMemoryIndexCache) get(key storecache.CacheKey) ([]byte, bool) { |
| 89 | + k := yoloBuf(key.String()) |
| 90 | + resp := c.cache.GetBig(nil, k) |
| 91 | + if len(resp) == 0 { |
| 92 | + return nil, false |
| 93 | + } |
| 94 | + return resp, true |
| 95 | +} |
| 96 | + |
| 97 | +func (c *InMemoryIndexCache) set(typ string, key storecache.CacheKey, val []byte) { |
| 98 | + k := yoloBuf(key.String()) |
| 99 | + r := c.cache.GetBig(nil, k) |
| 100 | + // item exists, no need to set it again. |
| 101 | + if r != nil { |
| 102 | + return |
| 103 | + } |
| 104 | + |
| 105 | + size := uint64(len(k) + len(val)) |
| 106 | + if size > c.maxItemSizeBytes { |
| 107 | + level.Info(c.logger).Log( |
| 108 | + "msg", "item bigger than maxItemSizeBytes. Ignoring..", |
| 109 | + "maxItemSizeBytes", c.maxItemSizeBytes, |
| 110 | + "cacheType", typ, |
| 111 | + ) |
| 112 | + c.overflow.WithLabelValues(typ).Inc() |
| 113 | + return |
| 114 | + } |
| 115 | + |
| 116 | + c.cache.SetBig(k, val) |
| 117 | + c.added.WithLabelValues(typ).Inc() |
| 118 | +} |
| 119 | + |
| 120 | +func yoloBuf(s string) []byte { |
| 121 | + return *(*[]byte)(unsafe.Pointer(&s)) |
| 122 | +} |
| 123 | + |
| 124 | +func copyString(s string) string { |
| 125 | + var b []byte |
| 126 | + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) |
| 127 | + h.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data |
| 128 | + h.Len = len(s) |
| 129 | + h.Cap = len(s) |
| 130 | + return string(b) |
| 131 | +} |
| 132 | + |
| 133 | +// copyToKey is required as underlying strings might be mmaped. |
| 134 | +func copyToKey(l labels.Label) storecache.CacheKeyPostings { |
| 135 | + return storecache.CacheKeyPostings(labels.Label{Value: copyString(l.Value), Name: copyString(l.Name)}) |
| 136 | +} |
| 137 | + |
| 138 | +// StorePostings sets the postings identified by the ulid and label to the value v, |
| 139 | +// if the postings already exists in the cache it is not mutated. |
| 140 | +func (c *InMemoryIndexCache) StorePostings(blockID ulid.ULID, l labels.Label, v []byte, tenant string) { |
| 141 | + c.commonMetrics.DataSizeBytes.WithLabelValues(cacheTypePostings, tenant).Observe(float64(len(v))) |
| 142 | + c.set(cacheTypePostings, storecache.CacheKey{Block: blockID.String(), Key: copyToKey(l)}, v) |
| 143 | +} |
| 144 | + |
| 145 | +// FetchMultiPostings fetches multiple postings - each identified by a label - |
| 146 | +// and returns a map containing cache hits, along with a list of missing keys. |
| 147 | +func (c *InMemoryIndexCache) FetchMultiPostings(ctx context.Context, blockID ulid.ULID, keys []labels.Label, tenant string) (hits map[labels.Label][]byte, misses []labels.Label) { |
| 148 | + timer := prometheus.NewTimer(c.commonMetrics.FetchLatency.WithLabelValues(cacheTypePostings, tenant)) |
| 149 | + defer timer.ObserveDuration() |
| 150 | + |
| 151 | + hits = map[labels.Label][]byte{} |
| 152 | + |
| 153 | + blockIDKey := blockID.String() |
| 154 | + requests := 0 |
| 155 | + hit := 0 |
| 156 | + for _, key := range keys { |
| 157 | + if ctx.Err() != nil { |
| 158 | + c.commonMetrics.RequestTotal.WithLabelValues(cacheTypePostings, tenant).Add(float64(requests)) |
| 159 | + c.commonMetrics.HitsTotal.WithLabelValues(cacheTypePostings, tenant).Add(float64(hit)) |
| 160 | + return hits, misses |
| 161 | + } |
| 162 | + requests++ |
| 163 | + if b, ok := c.get(storecache.CacheKey{Block: blockIDKey, Key: storecache.CacheKeyPostings(key)}); ok { |
| 164 | + hit++ |
| 165 | + hits[key] = b |
| 166 | + continue |
| 167 | + } |
| 168 | + |
| 169 | + misses = append(misses, key) |
| 170 | + } |
| 171 | + c.commonMetrics.RequestTotal.WithLabelValues(cacheTypePostings, tenant).Add(float64(requests)) |
| 172 | + c.commonMetrics.HitsTotal.WithLabelValues(cacheTypePostings, tenant).Add(float64(hit)) |
| 173 | + |
| 174 | + return hits, misses |
| 175 | +} |
| 176 | + |
| 177 | +// StoreExpandedPostings stores expanded postings for a set of label matchers. |
| 178 | +func (c *InMemoryIndexCache) StoreExpandedPostings(blockID ulid.ULID, matchers []*labels.Matcher, v []byte, tenant string) { |
| 179 | + c.commonMetrics.DataSizeBytes.WithLabelValues(cacheTypeExpandedPostings, tenant).Observe(float64(len(v))) |
| 180 | + c.set(cacheTypeExpandedPostings, storecache.CacheKey{Block: blockID.String(), Key: storecache.CacheKeyExpandedPostings(storecache.LabelMatchersToString(matchers))}, v) |
| 181 | +} |
| 182 | + |
| 183 | +// FetchExpandedPostings fetches expanded postings and returns cached data and a boolean value representing whether it is a cache hit or not. |
| 184 | +func (c *InMemoryIndexCache) FetchExpandedPostings(ctx context.Context, blockID ulid.ULID, matchers []*labels.Matcher, tenant string) ([]byte, bool) { |
| 185 | + timer := prometheus.NewTimer(c.commonMetrics.FetchLatency.WithLabelValues(cacheTypeExpandedPostings, tenant)) |
| 186 | + defer timer.ObserveDuration() |
| 187 | + |
| 188 | + if ctx.Err() != nil { |
| 189 | + return nil, false |
| 190 | + } |
| 191 | + c.commonMetrics.RequestTotal.WithLabelValues(cacheTypeExpandedPostings, tenant).Inc() |
| 192 | + if b, ok := c.get(storecache.CacheKey{Block: blockID.String(), Key: storecache.CacheKeyExpandedPostings(storecache.LabelMatchersToString(matchers))}); ok { |
| 193 | + c.commonMetrics.HitsTotal.WithLabelValues(cacheTypeExpandedPostings, tenant).Inc() |
| 194 | + return b, true |
| 195 | + } |
| 196 | + return nil, false |
| 197 | +} |
| 198 | + |
| 199 | +// StoreSeries sets the series identified by the ulid and id to the value v, |
| 200 | +// if the series already exists in the cache it is not mutated. |
| 201 | +func (c *InMemoryIndexCache) StoreSeries(blockID ulid.ULID, id storage.SeriesRef, v []byte, tenant string) { |
| 202 | + c.commonMetrics.DataSizeBytes.WithLabelValues(cacheTypeSeries, tenant).Observe(float64(len(v))) |
| 203 | + c.set(cacheTypeSeries, storecache.CacheKey{Block: blockID.String(), Key: storecache.CacheKeySeries(id)}, v) |
| 204 | +} |
| 205 | + |
| 206 | +// FetchMultiSeries fetches multiple series - each identified by ID - from the cache |
| 207 | +// and returns a map containing cache hits, along with a list of missing IDs. |
| 208 | +func (c *InMemoryIndexCache) FetchMultiSeries(ctx context.Context, blockID ulid.ULID, ids []storage.SeriesRef, tenant string) (hits map[storage.SeriesRef][]byte, misses []storage.SeriesRef) { |
| 209 | + timer := prometheus.NewTimer(c.commonMetrics.FetchLatency.WithLabelValues(cacheTypeSeries, tenant)) |
| 210 | + defer timer.ObserveDuration() |
| 211 | + |
| 212 | + hits = map[storage.SeriesRef][]byte{} |
| 213 | + |
| 214 | + blockIDKey := blockID.String() |
| 215 | + requests := 0 |
| 216 | + hit := 0 |
| 217 | + for _, id := range ids { |
| 218 | + if ctx.Err() != nil { |
| 219 | + c.commonMetrics.RequestTotal.WithLabelValues(cacheTypeSeries, tenant).Add(float64(requests)) |
| 220 | + c.commonMetrics.HitsTotal.WithLabelValues(cacheTypeSeries, tenant).Add(float64(hit)) |
| 221 | + return hits, misses |
| 222 | + } |
| 223 | + requests++ |
| 224 | + if b, ok := c.get(storecache.CacheKey{Block: blockIDKey, Key: storecache.CacheKeySeries(id)}); ok { |
| 225 | + hit++ |
| 226 | + hits[id] = b |
| 227 | + continue |
| 228 | + } |
| 229 | + |
| 230 | + misses = append(misses, id) |
| 231 | + } |
| 232 | + c.commonMetrics.RequestTotal.WithLabelValues(cacheTypeSeries, tenant).Add(float64(requests)) |
| 233 | + c.commonMetrics.HitsTotal.WithLabelValues(cacheTypeSeries, tenant).Add(float64(hit)) |
| 234 | + |
| 235 | + return hits, misses |
| 236 | +} |
0 commit comments