-
Notifications
You must be signed in to change notification settings - Fork 820
limits: distributor user subrings #1947
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -44,6 +44,7 @@ type ReadRing interface { | |
GetAll() (ReplicationSet, error) | ||
ReplicationFactor() int | ||
IngesterCount() int | ||
Subring(key uint32, n int) (ReadRing, error) | ||
} | ||
|
||
// Operation can be Read or Write | ||
|
@@ -377,3 +378,68 @@ func (r *Ring) Collect(ch chan<- prometheus.Metric) { | |
r.name, | ||
) | ||
} | ||
|
||
// Subring returns a ring of n ingesters from the given ring | ||
// Subrings are meant only for ingestor lookup and should have their data externalized. | ||
func (r *Ring) Subring(key uint32, n int) (ReadRing, error) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Would it make sense to use some caching here? We're going to call this on every push request, which looks like a lot of ring traversal. On the other hand, number of different keys isn't that high (= number of tenants), so perhaps we can use some of distributors memory to cache these subrings for some time. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I considered that, but was concerned about cache invalidation when the ring size changes. So I decided to at least punt on that for now. |
||
r.mtx.RLock() | ||
defer r.mtx.RUnlock() | ||
if r.ringDesc == nil || len(r.ringTokens) == 0 || n <= 0 { | ||
return nil, ErrEmptyRing | ||
} | ||
|
||
var ( | ||
ingesters = make(map[string]IngesterDesc, n) | ||
distinctHosts = map[string]struct{}{} | ||
start = r.search(key) | ||
iterations = 0 | ||
) | ||
|
||
// Subring exceeds number of ingesters, set to total ring size | ||
if n > len(r.ringDesc.Ingesters) { | ||
n = len(r.ringDesc.Ingesters) | ||
} | ||
|
||
for i := start; len(distinctHosts) < n && iterations < len(r.ringTokens); i++ { | ||
iterations++ | ||
// Wrap i around in the ring. | ||
i %= len(r.ringTokens) | ||
|
||
// We want n *distinct* ingesters. | ||
token := r.ringTokens[i] | ||
if _, ok := distinctHosts[token.Ingester]; ok { | ||
continue | ||
} | ||
distinctHosts[token.Ingester] = struct{}{} | ||
ingester := r.ringDesc.Ingesters[token.Ingester] | ||
|
||
ingesters[token.Ingester] = ingester | ||
} | ||
|
||
if n > len(ingesters) { | ||
return nil, fmt.Errorf("too few ingesters found") | ||
} | ||
|
||
numTokens := 0 | ||
for _, ing := range ingesters { | ||
numTokens += len(ing.Tokens) | ||
} | ||
|
||
sub := &Ring{ | ||
name: "subring", | ||
cfg: r.cfg, | ||
ringDesc: &Desc{ | ||
Ingesters: ingesters, | ||
}, | ||
ringTokens: make([]TokenDesc, 0, numTokens), | ||
} | ||
|
||
// add tokens for the ingesters in the subring, they should already be sorted, so no need to re-sort | ||
for _, t := range r.ringTokens { | ||
if _, ok := ingesters[t.Ingester]; ok { | ||
sub.ringTokens = append(sub.ringTokens, t) | ||
} | ||
} | ||
|
||
return sub, nil | ||
} |
Uh oh!
There was an error while loading. Please reload this page.