-
-
Notifications
You must be signed in to change notification settings - Fork 2.1k
Make DictionaryCache have better expiry properties #13292
Changes from 36 commits
326a175
40a8fba
a22716c
f046366
602a81f
23c2f39
cad555f
7aceec3
057ae8b
7f7b36d
462db2a
129691f
45d0dce
ca8e1af
e4723df
a74baa6
a9ebcd2
6a76dba
d4133b2
88aa56c
f053edb
740fe2f
378aec5
5709037
b376618
12e14f2
fed7755
c9f13f3
67bb06d
2ec4cab
50bb901
aa203c6
2151474
43e0030
3c23161
e5ef14d
10fb0d0
d521be2
3c84a98
9c54881
daa2741
055a5dd
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
Make `DictionaryCache` expire full entries if they haven't been queried in a while, even if specific keys have been queried recently. |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -14,11 +14,13 @@ | |
import enum | ||
import logging | ||
import threading | ||
from typing import Any, Dict, Generic, Iterable, Optional, Set, TypeVar | ||
from typing import Any, Dict, Generic, Iterable, Optional, Set, Tuple, TypeVar, Union | ||
|
||
import attr | ||
from typing_extensions import Literal | ||
|
||
from synapse.util.caches.lrucache import LruCache | ||
from synapse.util.caches.treecache import TreeCache | ||
|
||
logger = logging.getLogger(__name__) | ||
|
||
|
@@ -33,10 +35,12 @@ | |
|
||
# This class can't be generic because it uses slots with attrs. | ||
# See: https://github.com/python-attrs/attrs/issues/313 | ||
@attr.s(slots=True, auto_attribs=True) | ||
@attr.s(slots=True, frozen=True, auto_attribs=True) | ||
class DictionaryEntry: # should be: Generic[DKT, DV]. | ||
"""Returned when getting an entry from the cache | ||
|
||
If `full` is true then `known_absent` will be the empty set. | ||
|
||
Attributes: | ||
full: Whether the cache has the full or dict or just some keys. | ||
If not full then not all requested keys will necessarily be present | ||
|
@@ -53,20 +57,90 @@ def __len__(self) -> int: | |
return len(self.value) | ||
|
||
|
||
class _FullCacheKey(enum.Enum): | ||
"""The key we use to cache the full dict.""" | ||
|
||
KEY = object() | ||
|
||
|
||
class _Sentinel(enum.Enum): | ||
# defining a sentinel in this way allows mypy to correctly handle the | ||
# type of a dictionary lookup. | ||
sentinel = object() | ||
|
||
|
||
class _PerKeyValue(Generic[DV]): | ||
erikjohnston marked this conversation as resolved.
Show resolved
Hide resolved
|
||
"""The cached value of a dictionary key. If `value` is the sentinel, | ||
indicates that the requested key is known to *not* be in the full dict. | ||
""" | ||
|
||
__slots__ = ["value"] | ||
|
||
def __init__(self, value: Union[DV, Literal[_Sentinel.sentinel]]) -> None: | ||
self.value = value | ||
|
||
def __len__(self) -> int: | ||
# We add a `__len__` implementation as we use this class in a cache | ||
# where the values are variable length. | ||
return 1 | ||
|
||
|
||
class DictionaryCache(Generic[KT, DKT, DV]): | ||
"""Caches key -> dictionary lookups, supporting caching partial dicts, i.e. | ||
fetching a subset of dictionary keys for a particular key. | ||
|
||
This cache has two levels of key. First there is the "cache key" (of type | ||
`KT`), which maps to a dict. The keys to that dict are the "dict key" (of | ||
type `DKT`). The overall structure is therefore `KT->DKT->DV`. For | ||
example, it might look like: | ||
|
||
{ | ||
1: { 1: "a", 2: "b" }, | ||
2: { 1: "c" }, | ||
} | ||
|
||
It is possible to look up either individual dict keys, or the *complete* | ||
dict for a given cache key. | ||
|
||
Each dict item, and the complete dict is treated as a separate LRU | ||
entry for the purpose of cache expiry. For example, given: | ||
dict_cache.get(1, None) -> DictionaryEntry({1: "a", 2: "b"}) | ||
dict_cache.get(1, [1]) -> DictionaryEntry({1: "a"}) | ||
dict_cache.get(1, [2]) -> DictionaryEntry({2: "b"}) | ||
|
||
... then the cache entry for the complete dict will expire first, | ||
followed by the cache entry for the '1' dict key, and finally that | ||
for the '2' dict key. | ||
""" | ||
|
||
def __init__(self, name: str, max_entries: int = 1000): | ||
self.cache: LruCache[KT, DictionaryEntry] = LruCache( | ||
max_size=max_entries, cache_name=name, size_callback=len | ||
# We use a single LruCache to store two different types of entries: | ||
# 1. Map from (key, dict_key) -> dict value (or sentinel, indicating | ||
# the key doesn't exist in the dict); and | ||
# 2. Map from (key, _FullCacheKey.KEY) -> full dict. | ||
# | ||
# The former is used when explicit keys of the dictionary are looked up, | ||
# and the latter when the full dictionary is requested. | ||
# | ||
# If when explicit keys are requested and not in the cache, we then look | ||
# to see if we have the full dict and use that if we do. If found in the | ||
# full dict each key is added into the cache. | ||
# | ||
# This set up allows the `LruCache` to prune the full dict entries if | ||
# they haven't been used in a while, even when there have been recent | ||
# queries for subsets of the dict. | ||
# | ||
# Typing: | ||
# * A key of `(KT, DKT)` has a value of `_PerKeyValue` | ||
# * A key of `(KT, _FullCacheKey.KEY)` has a value of `Dict[DKT, DV]` | ||
self.cache: LruCache[ | ||
Tuple[KT, Union[DKT, Literal[_FullCacheKey.KEY]]], | ||
Union[_PerKeyValue, Dict[DKT, DV]], | ||
] = LruCache( | ||
max_size=max_entries, | ||
cache_name=name, | ||
cache_type=TreeCache, | ||
size_callback=len, | ||
) | ||
|
||
self.name = name | ||
|
@@ -96,18 +170,74 @@ def get( | |
Returns: | ||
DictionaryEntry | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this doesn't document what happens if There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. ... please? I think it returns some fixed shape of DictionaryEntry ? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Ah, I'd put some docs under the |
||
""" | ||
entry = self.cache.get(key, _Sentinel.sentinel) | ||
if entry is not _Sentinel.sentinel: | ||
if dict_keys is None: | ||
return DictionaryEntry( | ||
entry.full, entry.known_absent, dict(entry.value) | ||
) | ||
if dict_keys is None: | ||
# The caller wants the full set of dictionary keys for this cache key | ||
return self._get_full_dict(key) | ||
erikjohnston marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
# We are being asked for a subset of keys. | ||
|
||
# First go and check for each requested dict key in the cache, tracking | ||
# which we couldn't find. | ||
values = {} | ||
known_absent = set() | ||
missing = [] | ||
for dict_key in dict_keys: | ||
entry = self.cache.get((key, dict_key), _Sentinel.sentinel) | ||
if entry is _Sentinel.sentinel: | ||
missing.append(dict_key) | ||
continue | ||
|
||
assert isinstance(entry, _PerKeyValue) | ||
|
||
if entry.value is _Sentinel.sentinel: | ||
known_absent.add(dict_key) | ||
else: | ||
return DictionaryEntry( | ||
entry.full, | ||
entry.known_absent, | ||
{k: entry.value[k] for k in dict_keys if k in entry.value}, | ||
) | ||
values[dict_key] = entry.value | ||
|
||
# If we found everything we can return immediately. | ||
if not missing: | ||
return DictionaryEntry(False, known_absent, values) | ||
|
||
# We are missing some keys, so check if we happen to have the full dict in | ||
# the cache. | ||
# | ||
# We don't update the last access time for this cache fetch, as we | ||
# aren't explicitly interested in the full dict and so we don't want | ||
# requests for explicit dict keys to keep the full dict in the cache. | ||
entry = self.cache.get( | ||
(key, _FullCacheKey.KEY), | ||
_Sentinel.sentinel, | ||
update_last_access=False, | ||
) | ||
if entry is _Sentinel.sentinel: | ||
# Not in the cache, return the subset of keys we found. | ||
return DictionaryEntry(False, known_absent, values) | ||
|
||
# We have the full dict! | ||
assert isinstance(entry, dict) | ||
|
||
for dict_key in missing: | ||
# We explicitly add each dict key to the cache, so that cache hit | ||
# rates and LRU times for each key can be tracked separately. | ||
value = entry.get(dict_key, _Sentinel.sentinel) # type: ignore[arg-type] | ||
self.cache[(key, dict_key)] = _PerKeyValue(value) | ||
|
||
if value is not _Sentinel.sentinel: | ||
values[dict_key] = value | ||
|
||
return DictionaryEntry(True, set(), values) | ||
erikjohnston marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
def _get_full_dict( | ||
self, | ||
key: KT, | ||
) -> DictionaryEntry: | ||
"""Fetch the full dict for the given key.""" | ||
|
||
# First we check if we have cached the full dict. | ||
entry = self.cache.get((key, _FullCacheKey.KEY), _Sentinel.sentinel) | ||
if entry is not _Sentinel.sentinel: | ||
assert isinstance(entry, dict) | ||
return DictionaryEntry(True, set(), entry) | ||
|
||
return DictionaryEntry(False, set(), {}) | ||
|
||
|
@@ -117,7 +247,13 @@ def invalidate(self, key: KT) -> None: | |
# Increment the sequence number so that any SELECT statements that | ||
# raced with the INSERT don't update the cache (SYN-369) | ||
self.sequence += 1 | ||
self.cache.pop(key, None) | ||
|
||
# We want to drop all information about the dict for the given key, so | ||
# we use `del_multi` to delete it all in one go. | ||
# | ||
# We ignore the type error here `del_mutli` accepts a truncated key | ||
# (when the key type is a tuple). | ||
erikjohnston marked this conversation as resolved.
Show resolved
Hide resolved
|
||
self.cache.del_multi((key,)) # type: ignore[arg-type] | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. so how did this ever work before? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It wasn't a There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
oh gosh, I'd completely missed that. |
||
|
||
def invalidate_all(self) -> None: | ||
self.check_thread() | ||
|
@@ -149,20 +285,27 @@ def update( | |
# Only update the cache if the caches sequence number matches the | ||
# number that the cache had before the SELECT was started (SYN-369) | ||
if fetched_keys is None: | ||
self._insert(key, value, set()) | ||
self.cache[(key, _FullCacheKey.KEY)] = value | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. do we not need to update (or at least invalidate) entries for individual dict keys here as well? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I don't think so, if the value in the DB has changed then we should have previously called There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. fair enough. That seems like an assumption that we should document in the docstring though. (Also: shouldn't this method be called There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Yup, will try and put up a separate PR to do some of these fixes if they're small. |
||
else: | ||
self._update_or_insert(key, value, fetched_keys) | ||
self._update_subset(key, value, fetched_keys) | ||
|
||
def _update_or_insert( | ||
self, key: KT, value: Dict[DKT, DV], known_absent: Iterable[DKT] | ||
def _update_subset( | ||
self, key: KT, value: Dict[DKT, DV], fetched_keys: Iterable[DKT] | ||
) -> None: | ||
# We pop and reinsert as we need to tell the cache the size may have | ||
# changed | ||
"""Add the given dictionary values as explicit keys in the cache. | ||
|
||
Args: | ||
key: top-level cache key | ||
value: The dictionary with all the values that we should cache | ||
fetched_keys: The full set of dict keys that were looked up. Any keys | ||
here not in `value` should be marked as "known absent". | ||
""" | ||
|
||
for dict_key, dict_value in value.items(): | ||
self.cache[(key, dict_key)] = _PerKeyValue(dict_value) | ||
|
||
entry: DictionaryEntry = self.cache.pop(key, DictionaryEntry(False, set(), {})) | ||
entry.value.update(value) | ||
entry.known_absent.update(known_absent) | ||
self.cache[key] = entry | ||
for dict_key in fetched_keys: | ||
if dict_key in value: | ||
continue | ||
|
||
def _insert(self, key: KT, value: Dict[DKT, DV], known_absent: Set[DKT]) -> None: | ||
self.cache[key] = DictionaryEntry(True, known_absent, value) | ||
self.cache[(key, dict_key)] = _PerKeyValue(_Sentinel.sentinel) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
ideally we'd separate this to a different PR. It's not directly related to the changes to DictionaryCache.