Skip to content
  • Rate limit · GitHub

    Access has been restricted

    You have triggered a rate limit.

    Please wait a few minutes before you try again;
    in some cases this may take up to an hour.

  • Notifications You must be signed in to change notification settings
  • Fork 196
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add a make_cache_key parameter #159

Merged
merged 3 commits into from
May 31, 2020
Rate limit · GitHub

Access has been restricted

You have triggered a rate limit.

Please wait a few minutes before you try again;
in some cases this may take up to an hour.

Merged
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
refactor cached decorator to make it consistent with memoized
library consumers can now set their own make_cache_key
attribute of the returned cache decorator, accomidating use cases such as
the one in Issue 71 without even needing to change the public API since
this attribute has been documented read/write for years.
Rate limit · GitHub

Access has been restricted

You have triggered a rate limit.

Please wait a few minutes before you try again;
in some cases this may take up to an hour.

buckley-w-david committed Jan 29, 2020
commit 0ae838a5fea850268075eadf621611fe14f2ca2f
30 changes: 15 additions & 15 deletions flask_caching/__init__.py
Original file line number Diff line number Diff line change
@@ -333,6 +333,8 @@ def get_list():
**make_cache_key**
A function used in generating the cache_key used.

readable and writable

:param timeout: Default None. If set to an integer, will cache for that
amount of time. Unit of time is in seconds.

@@ -390,12 +392,7 @@ def decorated_function(*args, **kwargs):
return f(*args, **kwargs)

try:
if query_string:
cache_key = _make_cache_key_query_string()
else:
cache_key = _make_cache_key(
args, kwargs, use_request=True
)
cache_key = decorated_function.make_cache_key(args, kwargs, use_request=True)

if (
callable(forced_update)
@@ -487,17 +484,20 @@ def _make_cache_key_query_string():
return cache_key

def _make_cache_key(args, kwargs, use_request):
if callable(key_prefix):
cache_key = key_prefix()
elif "%s" in key_prefix:
if use_request:
cache_key = key_prefix % request.path
else:
cache_key = key_prefix % url_for(f.__name__, **kwargs)
if query_string:
return _make_cache_key_query_string()
else:
cache_key = key_prefix
if callable(key_prefix):
cache_key = key_prefix()
elif "%s" in key_prefix:
if use_request:
cache_key = key_prefix % request.path
else:
cache_key = key_prefix % url_for(f.__name__, **kwargs)
else:
cache_key = key_prefix

return cache_key
return cache_key

decorated_function.uncached = f
decorated_function.cache_timeout = timeout
58 changes: 58 additions & 0 deletions tests/test_view.py
Original file line number Diff line number Diff line change
@@ -332,3 +332,61 @@ def view_works():
# ... making sure that different query parameter values
# don't yield the same cache!
assert not third_time == second_time


def test_generate_cache_key_from_request_body(app, cache):
"""Test a user supplied cache key maker.
Create three requests to verify that the same request body
always reference the same cache
Also test to make sure that the same cache isn't being used for
any/all query string parameters.
Caching functionality is verified by a `@cached` route `/works` which
produces a time in its response. The time in the response can verify that
two requests with the same request body produce responses with the same time.
"""

def _make_cache_key_request_body():
"""Create keys based on request body."""
# now hash the request body so it can be
# used as a key for cache.
request_body = request.get_data(as_text=False)
hashed_body = str(hashlib.md5(request_body).hexdigest())
cache_key = request.path + hashed_body
return cache_key

cache_decorator = cache.cached()
cache_decorator.make_cache_key = _make_cache_key_request_body

@app.route('/works', methods=['POST'])
@cache_decorator
def view_works():
return str(time.time()) + request.get_data().decode()

tc = app.test_client()

# Make our request...
first_response = tc.post(
'/works', data=dict(mock=True, value=1, test=2)
)
first_time = first_response.get_data(as_text=True)

# Make the request...
second_response = tc.post(
'/works', data=dict(mock=True, value=1, test=2)
)
second_time = second_response.get_data(as_text=True)

# Now make sure the time for the first and second
# requests are the same!
assert second_time == first_time

# Last/third request with different body should
# produce a different time.
third_response = tc.get(
'/v1/works', data=dict(mock=True, value=2, test=3)
)
third_time = third_response.get_data(as_text=True)

# ... making sure that different request bodies
# don't yield the same cache!
assert not third_time == second_time