diff --git a/.gitignore b/.gitignore index ee73fd369..ef2407e15 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,4 @@ t/servroot* lua-resty-mlcache-*/ *.tar.gz *.rock +work/ diff --git a/.travis.yml b/.travis.yml index 56c81effb..94549ad85 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,17 +18,18 @@ cache: env: global: - JOBS=2 - - LUAROCKS=3.2.1 + - LUAROCKS=3.4.0 - TEST_NGINX_RANDOMIZE=1 matrix: - - OPENRESTY=1.15.8.2 + - OPENRESTY=1.19.3.1 + - OPENRESTY=1.17.8.2 + - OPENRESTY=1.17.8.1 + - OPENRESTY=1.15.8.3 - OPENRESTY=1.15.8.1 - OPENRESTY=1.13.6.2 - OPENRESTY=1.13.6.1 - OPENRESTY=1.11.2.5 - - OPENRESTY=1.11.2.4 - - OPENRESTY=1.11.2.3 - - OPENRESTY=1.11.2.2 + - OPENRESTY=1.11.2.1 before_install: - mkdir -p download-cache diff --git a/CHANGELOG.md b/CHANGELOG.md index 6dc22fd54..4e9838efe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,6 @@ # Table of Contents +- [2.5.0](#2.5.0) - [2.4.1](#2.4.1) - [2.4.0](#2.4.0) - [2.3.0](#2.3.0) @@ -12,6 +13,32 @@ - [1.0.1](#1.0.1) - [1.0.0](#1.0.0) +## [2.5.0] + +> Released on: 2020/11/18 + +#### Added + +- `get()` callback functions are now optional. Without a callback, `get()` now + still performs on-cpu L1/L2 lookups (no yielding). This allows implementing + new cache lookup patterns guaranteed to be on-cpu for a more constant, + smoother latency tail end (e.g. values are refreshed in background timers with + `set()`). + Thanks Hamish Forbes and Corina Purcarea for proposing this feature and + participating in its development! + [#96](https://github.com/thibaultcha/lua-resty-mlcache/pull/96) + +#### Fixed + +- Improve `update()` robustness to worker crashes. Now, the library behind + `cache:update()` is much more robust to re-spawned workers when initialized in + the `init_by_lua` phase. + [#97](https://github.com/thibaultcha/lua-resty-mlcache/pull/97) +- Document the `peek()` method `stale` argument which was not mentioned, as well + as the possibility of negative TTL return values for expired items. + +[Back to TOC](#table-of-contents) + ## [2.4.1] > Released on: 2020/01/17 @@ -215,6 +242,7 @@ Initial release. [Back to TOC](#table-of-contents) +[2.5.0]: https://github.com/thibaultcha/lua-resty-mlcache/compare/2.4.1...2.5.0 [2.4.1]: https://github.com/thibaultcha/lua-resty-mlcache/compare/2.4.0...2.4.1 [2.4.0]: https://github.com/thibaultcha/lua-resty-mlcache/compare/2.3.0...2.4.0 [2.3.0]: https://github.com/thibaultcha/lua-resty-mlcache/compare/2.2.1...2.3.0 diff --git a/README.md b/README.md index e2c6e8b6f..1b49989c6 100644 --- a/README.md +++ b/README.md @@ -159,14 +159,11 @@ Tests matrix results: | OpenResty | Compatibility |------------:|:--------------------| | < | not tested -| `1.11.2.2` | :heavy_check_mark: -| `1.11.2.3` | :heavy_check_mark: -| `1.11.2.4` | :heavy_check_mark: -| `1.11.2.5` | :heavy_check_mark: -| `1.13.6.1` | :heavy_check_mark: -| `1.13.6.2` | :heavy_check_mark: -| `1.15.8.1` | :heavy_check_mark: -| `1.15.8.2` | :heavy_check_mark: +| `1.11.2.x` | :heavy_check_mark: +| `1.13.6.x` | :heavy_check_mark: +| `1.15.8.x` | :heavy_check_mark: +| `1.17.8.x` | :heavy_check_mark: +| `1.19.3.x` | :heavy_check_mark: | > | not tested [Back to TOC](#table-of-contents) @@ -333,16 +330,16 @@ Lua VM will be created for every request. get --- -**syntax:** `value, err, hit_level = cache:get(key, opts?, callback, ...)` +**syntax:** `value, err, hit_level = cache:get(key, opts?, callback?, ...)` Perform a cache lookup. This is the primary and most efficient method of this module. A typical pattern is to *not* call [set()](#set), and let [get()](#get) perform all the work. -When it succeeds, it returns `value` and no error. **Because `nil` values from -the L3 callback are cached to signify misses, `value` can be nil, hence one -must rely on the second return value `err` to determine if this method -succeeded or not**. +When this method succeeds, it returns `value` and no error. **Because `nil` +values from the L3 callback can be cached (i.e. "negative caching"), `value` can +be nil albeit already cached. Hence, one must rely on the second return value +`err` to determine if this method succeeded or not**. The third return value is a number which is set if no error was encountered. It indicated the level at which the value was fetched: `1` for L1, `2` for L2, @@ -410,8 +407,8 @@ options: tables, cdata objects, loading new Lua code, etc... **Default:** inherited from the instance. -The third argument `callback` **must** be a function. Its signature and return -values are documented in the following example: +The third argument `callback` is optional. If provided, it must be a function +whose signature and return values are documented in the following example: ```lua -- arg1, arg2, and arg3 are arguments forwarded to the callback from the @@ -433,11 +430,29 @@ local function callback(arg1, arg2, arg3) end ``` -This function is allowed to throw Lua errors as it runs in protected mode. Such -errors thrown from the callback will be returned as strings in the second -return value `err`. +The provided `callback` function is allowed to throw Lua errors as it runs in +protected mode. Such errors thrown from the callback will be returned as strings +in the second return value `err`. -When called, `get()` follows the below logic: +If `callback` is not provided, `get()` will still lookup the requested key in +the L1 and L2 caches and return it if found. In the case when no value is found +in the cache **and** no callback is provided, `get()` will return `nil, nil, +-1`, where -1 signifies a **cache miss** (no value). This is not to be confused +with return values such as `nil, nil, 1`, where 1 signifies a **negative cached +item** found in L1 (cached `nil`). + +```lua +local value, err, hit_lvl = cache:get("key") +if value == nil then + if hit_lvl == -1 then + -- miss (no value) + end + + -- negative hit (cached `nil`) +end +``` + +When provided a callback, `get()` follows the below logic: 1. query the L1 cache (lua-resty-lrucache instance). This cache lives in the Lua VM, and as such, it is the most efficient one to query. @@ -465,7 +480,9 @@ When called, `get()` follows the below logic: are unlocked and read the value from the L2 cache (they do not run the L3 callback) and return it. -Example: +When not provided a callback, `get()` will only execute steps 1. and 2. + +Here is a complete example usage: ```lua local mlcache = require "mlcache" @@ -504,8 +521,8 @@ else end ``` -This second example is a modification of the above one, in which we apply -some transformation to the retrieved `user` record, and cache it via the +This second example is similar to the one above, but here we apply some +transformation to the retrieved `user` record before caching it via the `l1_serializer` callback: ```lua @@ -701,31 +718,35 @@ end peek ---- -**syntax:** `ttl, err, value = cache:peek(key)` +**syntax:** `ttl, err, value = cache:peek(key, stale?)` Peek into the L2 (`lua_shared_dict`) cache. -The first and only argument `key` is a string, and it is the key to lookup. +The first argument `key` is a string which is the key to lookup in the cache. + +The second argument `stale` is optional. If `true`, then `peek()` will consider +stale values as cached values. If not provided, `peek()` will consider stale +values, as if they were not in the cache This method returns `nil` and a string describing the error upon failure. -Upon success, but if there is no such value for the queried `key`, it returns -`nil` as its first argument, and no error. The same applies to cached misses -looked up with this function. +If there is no value for the queried `key`, it returns `nil` and no error. -Upon success, and if there is such a value for the queried `key`, it returns a -number indicating the remaining TTL of the cached value. The third returned -value in that case will be the cached value itself, for convenience. +If there is a value for the queried `key`, it returns a number indicating the +remaining TTL of the cached value (in seconds) and no error. If the value for +`key` has expired but is still in the L2 cache, returned TTL value will be +negative. Finally, the third returned value in that case will be the cached +value itself, for convenience. -This method is useful if you want to know whether a value is cached or not. A -value stored in the L2 cache is considered cached, regardless of whether or not -it is also set in the L1 cache of the worker. That is because the L1 cache is -too volatile (as its size unit is a number of slots), and the L2 cache is +This method is useful when you want to determine if a value is cached. A value +stored in the L2 cache is considered cached regardless of whether or not it is +also set in the L1 cache of the worker. That is because the L1 cache is +considered volatile (as its size unit is a number of slots), and the L2 cache is still several orders of magnitude faster than the L3 callback anyway. As its only intent is to take a "peek" into the cache to determine its warmth for a given value, `peek()` does not count as a query like [get()](#get), and -does not set the value in the L1 cache. +does not promote the value to the L1 cache. Example: @@ -761,6 +782,11 @@ ngx.say(ttl) -- 3 ngx.say(value) -- "some value" ``` +**Note:** since mlcache `2.5.0`, it is also possible to call [get()](#get) +without a callback function in order to "query" the cache. Unlike `peek()`, a +`get()` call with no callback *will* promote the value to the L1 cache, and +*will not* return its TTL. + [Back to TOC](#table-of-contents) set @@ -865,7 +891,7 @@ request, to make sure they refreshed their L1 cache. update ------ -**syntax:** `ok, err = cache:update()` +**syntax:** `ok, err = cache:update(timeout?)` Poll and execute pending cache invalidation events published by other workers. @@ -881,6 +907,12 @@ This method allows a worker to update its L1 cache (by purging values considered stale due to an other worker calling `set()`, `delete()`, or `purge()`) before processing a request. +This method accepts a `timeout` argument whose unit is seconds and which +defaults to `0.3` (300ms). The update operation will timeout if it isn't done +when this threshold in reached. This avoids `update()` from staying on the CPU +too long in case there are too many events to process. In an eventually +consistent system, additional events can wait for the next call to be processed. + A typical design pattern is to call `update()` **only once** before each request processing. This allows your hot code paths to perform a single shm access in the best case scenario: no invalidation events were received, all diff --git a/lib/resty/mlcache.lua b/lib/resty/mlcache.lua index 586cf9616..de8eb7417 100644 --- a/lib/resty/mlcache.lua +++ b/lib/resty/mlcache.lua @@ -164,7 +164,7 @@ end local _M = { - _VERSION = "2.4.1", + _VERSION = "2.5.0", _AUTHOR = "Thibault Charbonnier", _LICENSE = "MIT", _URL = "https://github.com/thibaultcha/lua-resty-mlcache", @@ -821,8 +821,8 @@ function _M:get(key, opts, cb, ...) error("key must be a string", 2) end - if type(cb) ~= "function" then - error("callback must be a function", 2) + if cb ~= nil and type(cb) ~= "function" then + error("callback must be nil or a function", 2) end -- worker LRU cache retrieval @@ -864,7 +864,13 @@ function _M:get(key, opts, cb, ...) end -- not in shm either - -- single worker must execute the callback + + if cb == nil then + -- no L3 callback, early exit + return nil, nil, -1 + end + + -- L3 callback, single worker to run it return run_callback(self, key, namespaced_key, data, ttl, neg_ttl, went_stale, l1_serializer, resurrect_ttl, diff --git a/lib/resty/mlcache/ipc.lua b/lib/resty/mlcache/ipc.lua index da6174d32..6f00a55fc 100644 --- a/lib/resty/mlcache/ipc.lua +++ b/lib/resty/mlcache/ipc.lua @@ -20,6 +20,7 @@ local setmetatable = setmetatable local INDEX_KEY = "lua-resty-ipc:index" +local FORCIBLE_KEY = "lua-resty-ipc:forcible" local POLL_SLEEP_RATIO = 2 @@ -59,19 +60,10 @@ function _M.new(shm, debug) return nil, "no such lua_shared_dict: " .. shm end - local idx, err = dict:get(INDEX_KEY) - if err then - return nil, "failed to get index: " .. err - end - - if idx ~= nil and type(idx) ~= "number" then - return nil, "index is not a number, shm tampered with" - end - local self = { dict = dict, pid = debug and 0 or worker_pid(), - idx = idx or 0, + idx = 0, callbacks = {}, } @@ -113,11 +105,21 @@ function _M:broadcast(channel, data) return nil, "failed to increment index: " .. err end - local ok, err = self.dict:set(idx, marshalled_event) + local ok, err, forcible = self.dict:set(idx, marshalled_event) if not ok then return nil, "failed to insert event in shm: " .. err end + if forcible then + -- take note that eviction has started + -- we repeat this flagging to avoid this key from ever being + -- evicted itself + local ok, err = self.dict:set(FORCIBLE_KEY, true) + if not ok then + return nil, "failed to set forcible flag in shm: " .. err + end + end + return true end @@ -134,17 +136,17 @@ function _M:poll(timeout) error("timeout must be a number", 2) end - local idx, err = self.dict:get(INDEX_KEY) + local shm_idx, err = self.dict:get(INDEX_KEY) if err then return nil, "failed to get index: " .. err end - if idx == nil then + if shm_idx == nil then -- no events to poll yet return true end - if type(idx) ~= "number" then + if type(shm_idx) ~= "number" then return nil, "index is not a number, shm tampered with" end @@ -152,9 +154,27 @@ function _M:poll(timeout) timeout = 0.3 end + if self.idx == 0 then + local forcible, err = self.dict:get(FORCIBLE_KEY) + if err then + return nil, "failed to get forcible flag from shm: " .. err + end + + if forcible then + -- shm lru eviction occurred, we are likely a new worker + -- skip indexes that may have been evicted and resume current + -- polling idx + self.idx = shm_idx - 1 + end + + else + -- guard: self.idx <= shm_idx + self.idx = min(self.idx, shm_idx) + end + local elapsed = 0 - for _ = self.idx, idx - 1 do + for _ = self.idx, shm_idx - 1 do -- fetch event from shm with a retry policy in case -- we run our :get() in between another worker's -- :incr() and :set() diff --git a/lua-resty-mlcache-2.4.1-1.rockspec b/lua-resty-mlcache-2.5.0-1.rockspec similarity index 97% rename from lua-resty-mlcache-2.4.1-1.rockspec rename to lua-resty-mlcache-2.5.0-1.rockspec index 21367f217..857bf52d8 100644 --- a/lua-resty-mlcache-2.4.1-1.rockspec +++ b/lua-resty-mlcache-2.5.0-1.rockspec @@ -1,8 +1,8 @@ package = "lua-resty-mlcache" -version = "2.4.1-1" +version = "2.5.0-1" source = { url = "git://github.com/thibaultcha/lua-resty-mlcache", - tag = "2.4.1" + tag = "2.5.0" } description = { summary = "Layered caching library for OpenResty", diff --git a/t/00-ipc.t b/t/00-ipc.t index e9a03d2a9..f2e947dc4 100644 --- a/t/00-ipc.t +++ b/t/00-ipc.t @@ -57,78 +57,7 @@ no such lua_shared_dict: foo -=== TEST 2: new() picks up current idx if already set -This ensures new workers spawned during a master process' lifecycle do not -attempt to replay all events from index 0. -https://github.com/thibaultcha/lua-resty-mlcache/issues/87 ---- http_config eval -qq{ - lua_package_path "$::pwd/lib/?.lua;;"; - lua_shared_dict ipc 1m; - - init_by_lua_block { - require "resty.core" - - assert(ngx.shared.ipc:set("lua-resty-ipc:index", 42)) - } -} ---- config - location = /t { - content_by_lua_block { - local mlcache_ipc = require "resty.mlcache.ipc" - - local ipc, err = mlcache_ipc.new("ipc") - if not ipc then - error(err) - end - - ngx.say(ipc.idx) - } - } ---- request -GET /t ---- response_body -42 ---- no_error_log -[warn] -[error] -[crit] - - - -=== TEST 3: new() checks type of current idx if already set ---- http_config eval -qq{ - lua_package_path "$::pwd/lib/?.lua;;"; - lua_shared_dict ipc 1m; - - init_by_lua_block { - require "resty.core" - - assert(ngx.shared.ipc:set("lua-resty-ipc:index", "42")) - } -} ---- config - location = /t { - content_by_lua_block { - local mlcache_ipc = require "resty.mlcache.ipc" - - local ipc, err = mlcache_ipc.new("ipc") - ngx.say(err) - } - } ---- request -GET /t ---- response_body -index is not a number, shm tampered with ---- no_error_log -[warn] -[error] -[crit] - - - -=== TEST 4: broadcast() sends an event through shm +=== TEST 2: broadcast() sends an event through shm --- http_config eval qq{ $::HttpConfig @@ -162,7 +91,7 @@ received event from my_channel: hello world -=== TEST 5: broadcast() runs event callback in protected mode +=== TEST 3: broadcast() runs event callback in protected mode --- http_config eval qq{ $::HttpConfig @@ -196,7 +125,7 @@ lua entry thread aborted: runtime error -=== TEST 6: poll() catches invalid timeout arg +=== TEST 4: poll() catches invalid timeout arg --- http_config eval qq{ $::HttpConfig @@ -225,7 +154,7 @@ timeout must be a number -=== TEST 7: poll() catches up with all events +=== TEST 5: poll() catches up with all events --- http_config eval qq{ $::HttpConfig @@ -263,7 +192,62 @@ received event from my_channel: msg 3 -=== TEST 8: poll() does not execute events from self (same pid) +=== TEST 6: poll() resumes to current idx if events were previously evicted +This ensures new workers spawned during a master process' lifecycle do not +attempt to replay all events from index 0. +https://github.com/thibaultcha/lua-resty-mlcache/issues/87 +https://github.com/thibaultcha/lua-resty-mlcache/issues/93 +--- http_config eval +qq{ + lua_package_path "$::pwd/lib/?.lua;;"; + lua_shared_dict ipc 32k; + + init_by_lua_block { + require "resty.core" + local mlcache_ipc = require "resty.mlcache.ipc" + + ipc = assert(mlcache_ipc.new("ipc", true)) + + ipc:subscribe("my_channel", function(data) + ngx.log(ngx.NOTICE, "my_channel event: ", data) + end) + + for i = 1, 32 do + -- fill shm, simulating busy workers + -- this must trigger eviction for this test to succeed + assert(ipc:broadcast("my_channel", string.rep(".", 2^10))) + end + } +} +--- config + location = /t { + content_by_lua_block { + ngx.say("ipc.idx: ", ipc.idx) + + assert(ipc:broadcast("my_channel", "first broadcast")) + assert(ipc:broadcast("my_channel", "second broadcast")) + + -- first poll without new() to simulate new worker + assert(ipc:poll()) + + -- ipc.idx set to shm_idx-1 ("second broadcast") + ngx.say("ipc.idx: ", ipc.idx) + } + } +--- request +GET /t +--- response_body +ipc.idx: 0 +ipc.idx: 34 +--- error_log +my_channel event: second broadcast +--- no_error_log +my_channel event: first broadcast +[error] + + + +=== TEST 7: poll() does not execute events from self (same pid) --- http_config eval qq{ $::HttpConfig @@ -296,7 +280,7 @@ received event from my_channel: hello world -=== TEST 9: poll() runs all registered callbacks for a channel +=== TEST 8: poll() runs all registered callbacks for a channel --- http_config eval qq{ $::HttpConfig @@ -340,7 +324,7 @@ callback 3 from my_channel: hello world -=== TEST 10: poll() exits when no event to poll +=== TEST 9: poll() exits when no event to poll --- http_config eval qq{ $::HttpConfig @@ -371,7 +355,7 @@ callback from my_channel: hello world -=== TEST 11: poll() runs all callbacks from all channels +=== TEST 10: poll() runs all callbacks from all channels --- http_config eval qq{ $::HttpConfig @@ -424,7 +408,7 @@ callback 2 from other_channel: hello ipc 2 -=== TEST 12: poll() catches tampered shm (by third-party users) +=== TEST 11: poll() catches tampered shm (by third-party users) --- http_config eval qq{ $::HttpConfig @@ -457,7 +441,7 @@ index is not a number, shm tampered with -=== TEST 13: poll() retries getting an event until timeout +=== TEST 12: poll() retries getting an event until timeout --- http_config eval qq{ $::HttpConfig @@ -502,7 +486,7 @@ GET /t -=== TEST 14: poll() reaches custom timeout +=== TEST 13: poll() reaches custom timeout --- http_config eval qq{ $::HttpConfig @@ -542,7 +526,7 @@ GET /t -=== TEST 15: poll() logs errors and continue if event has been tampered with +=== TEST 14: poll() logs errors and continue if event has been tampered with --- http_config eval qq{ $::HttpConfig @@ -580,7 +564,7 @@ GET /t -=== TEST 16: poll() is safe to be called in contexts that don't support ngx.sleep() +=== TEST 15: poll() is safe to be called in contexts that don't support ngx.sleep() --- http_config eval qq{ $::HttpConfig @@ -624,6 +608,52 @@ GET /t +=== TEST 16: poll() guards self.idx from growing beyond the current shm idx +--- http_config eval +qq{ + $::HttpConfig + + init_worker_by_lua_block { + local mlcache_ipc = require "resty.mlcache.ipc" + + ipc = assert(mlcache_ipc.new("ipc", true)) + + ipc:subscribe("my_channel", function(data) + ngx.log(ngx.NOTICE, "callback from my_channel: ", data) + end) + } +} +--- config + location = /t { + content_by_lua_block { + assert(ipc:broadcast("other_channel", "")) + assert(ipc:poll()) + assert(ipc:broadcast("my_channel", "fist broadcast")) + assert(ipc:broadcast("other_channel", "")) + assert(ipc:broadcast("my_channel", "second broadcast")) + + -- shm idx is 5, let's mess with the instance's idx + ipc.idx = 10 + assert(ipc:poll()) + + -- we may have skipped the above events, but we are able to resume polling + assert(ipc:broadcast("other_channel", "")) + assert(ipc:broadcast("my_channel", "third broadcast")) + assert(ipc:poll()) + } + } +--- request +GET /t +--- ignore_response_body +--- error_log +callback from my_channel: third broadcast +--- no_error_log +callback from my_channel: first broadcast +callback from my_channel: second broadcast +[error] + + + === TEST 17: poll() JITs --- http_config eval qq{ diff --git a/t/02-get.t b/t/02-get.t index 17bf14093..40de0efc3 100644 --- a/t/02-get.t +++ b/t/02-get.t @@ -72,7 +72,7 @@ key must be a string -=== TEST 2: get() validates callback +=== TEST 2: get() accepts callback as nil or function --- http_config eval: $::HttpConfig --- config location = /t { @@ -85,7 +85,12 @@ key must be a string return end - local ok, err = pcall(cache.get, cache, "key") + local ok, err = pcall(cache.get, cache, "key", nil, nil) + if not ok then + ngx.say(err) + end + + local ok, err = pcall(cache.get, cache, "key", nil, function() end) if not ok then ngx.say(err) end @@ -94,14 +99,13 @@ key must be a string --- request GET /t --- response_body -callback must be a function + --- no_error_log [error] -=== TEST 3: get() validates opts ---- SKIP: no options yet +=== TEST 3: get() rejects callbacks not nil or function --- http_config eval: $::HttpConfig --- config location = /t { @@ -114,7 +118,41 @@ callback must be a function return end - local ok, err = pcall(cache.get, cache, "key", function() end, 0) + local ok, err = pcall(cache.get, cache, "key", nil, "not a function") + if not ok then + ngx.say(err) + end + + local ok, err = pcall(cache.get, cache, "key", nil, false) + if not ok then + ngx.say(err) + end + } + } +--- request +GET /t +--- response_body +callback must be nil or a function +callback must be nil or a function +--- no_error_log +[error] + + + +=== TEST 4: get() validates opts +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local mlcache = require "resty.mlcache" + + local cache, err = mlcache.new("my_mlcache", "cache_shm") + if not cache then + ngx.log(ngx.ERR, err) + return + end + + local ok, err = pcall(cache.get, cache, "key", "opts") if not ok then ngx.say(err) end @@ -129,7 +167,7 @@ opts must be a table -=== TEST 4: get() calls callback in protected mode with stack traceback +=== TEST 5: get() calls callback in protected mode with stack traceback --- http_config eval: $::HttpConfig --- config location = /t { @@ -164,7 +202,7 @@ stack traceback: -=== TEST 5: get() is resilient to callback runtime errors with non-string arguments +=== TEST 6: get() is resilient to callback runtime errors with non-string arguments --- http_config eval: $::HttpConfig --- config location = /t { @@ -198,7 +236,7 @@ callback threw an error: table: 0x[0-9a-fA-F]+ -=== TEST 6: get() caches a number +=== TEST 7: get() caches a number --- http_config eval: $::HttpConfig --- config location = /t { @@ -259,7 +297,7 @@ from shm: number 123 -=== TEST 7: get() caches a boolean (true) +=== TEST 8: get() caches a boolean (true) --- http_config eval: $::HttpConfig --- config location = /t { @@ -320,7 +358,7 @@ from shm: boolean true -=== TEST 8: get() caches a boolean (false) +=== TEST 9: get() caches a boolean (false) --- http_config eval: $::HttpConfig --- config location = /t { @@ -381,7 +419,7 @@ from shm: boolean false -=== TEST 9: get() caches nil +=== TEST 10: get() caches nil --- http_config eval: $::HttpConfig --- config location = /t { @@ -442,7 +480,7 @@ from shm: nil nil -=== TEST 10: get() caches nil in 'shm_miss' if specified +=== TEST 11: get() caches nil in 'shm_miss' if specified --- http_config eval: $::HttpConfig --- config location = /t { @@ -523,7 +561,7 @@ value in lru is a sentinel nil value: true -=== TEST 11: get() caches a string +=== TEST 12: get() caches a string --- http_config eval: $::HttpConfig --- config location = /t { @@ -584,7 +622,7 @@ from shm: string hello world -=== TEST 12: get() caches a table +=== TEST 13: get() caches a table --- http_config eval: $::HttpConfig --- config location = /t { @@ -649,7 +687,7 @@ from shm: table world bar -=== TEST 13: get() errors when caching an unsupported type +=== TEST 14: get() errors when caching an unsupported type --- http_config eval: $::HttpConfig --- config location = /t { @@ -682,7 +720,7 @@ qr/\[error\] .*?mlcache\.lua:\d+: cannot cache value of type userdata/ -=== TEST 14: get() calls callback with args +=== TEST 15: get() calls callback with args --- http_config eval: $::HttpConfig --- config location = /t { @@ -717,7 +755,7 @@ GET /t -=== TEST 15: get() caches hit for 'ttl' from LRU (in ms) +=== TEST 16: get() caches hit for 'ttl' from LRU (in ms) --- http_config eval: $::HttpConfig --- config location = /t { @@ -755,7 +793,7 @@ in callback -=== TEST 16: get() caches miss (nil) for 'neg_ttl' from LRU (in ms) +=== TEST 17: get() caches miss (nil) for 'neg_ttl' from LRU (in ms) --- http_config eval: $::HttpConfig --- config location = /t { @@ -799,7 +837,7 @@ in callback -=== TEST 17: get() caches for 'opts.ttl' from LRU (in ms) +=== TEST 18: get() caches for 'opts.ttl' from LRU (in ms) --- http_config eval: $::HttpConfig --- config location = /t { @@ -837,7 +875,7 @@ in callback -=== TEST 18: get() caches for 'opts.neg_ttl' from LRU (in ms) +=== TEST 19: get() caches for 'opts.neg_ttl' from LRU (in ms) --- http_config eval: $::HttpConfig --- config location = /t { @@ -878,7 +916,7 @@ in callback -=== TEST 19: get() with ttl of 0 means indefinite caching +=== TEST 20: get() with ttl of 0 means indefinite caching --- http_config eval: $::HttpConfig --- config location = /t { @@ -925,7 +963,7 @@ in shm after exp: 123 -=== TEST 20: get() with neg_ttl of 0 means indefinite caching for nil values +=== TEST 21: get() with neg_ttl of 0 means indefinite caching for nil values --- http_config eval: $::HttpConfig --- config location = /t { @@ -974,7 +1012,7 @@ in shm after exp: nil -=== TEST 21: get() errors when ttl < 0 +=== TEST 22: get() errors when ttl < 0 --- http_config eval: $::HttpConfig --- config location = /t { @@ -1007,7 +1045,7 @@ opts.ttl must be >= 0 -=== TEST 22: get() errors when neg_ttl < 0 +=== TEST 23: get() errors when neg_ttl < 0 --- http_config eval: $::HttpConfig --- config location = /t { @@ -1040,7 +1078,7 @@ opts.neg_ttl must be >= 0 -=== TEST 23: get() shm -> LRU caches for 'opts.ttl - since' in ms +=== TEST 24: get() shm -> LRU caches for 'opts.ttl - since' in ms --- http_config eval: $::HttpConfig --- config location = /t { @@ -1098,7 +1136,7 @@ is stale in LRU: 123 -=== TEST 24: get() shm -> LRU caches non-nil for 'indefinite' if ttl is 0 +=== TEST 25: get() shm -> LRU caches non-nil for 'indefinite' if ttl is 0 --- http_config eval: $::HttpConfig --- config location = /t { @@ -1142,7 +1180,7 @@ is not expired in LRU: 123 -=== TEST 25: get() shm -> LRU caches for 'opts.neg_ttl - since' in ms +=== TEST 26: get() shm -> LRU caches for 'opts.neg_ttl - since' in ms --- http_config eval: $::HttpConfig --- config location = /t { @@ -1202,7 +1240,7 @@ is stale in LRU: table: \S+ -=== TEST 26: get() shm -> LRU caches nil for 'indefinite' if neg_ttl is 0 +=== TEST 27: get() shm -> LRU caches nil for 'indefinite' if neg_ttl is 0 --- http_config eval: $::HttpConfig --- config location = /t { @@ -1245,7 +1283,7 @@ is stale in LRU: nil -=== TEST 27: get() returns hit level +=== TEST 28: get() returns hit level --- http_config eval: $::HttpConfig --- config location = /t { @@ -1283,7 +1321,7 @@ hit level from shm: 2 -=== TEST 28: get() returns hit level for nil hits +=== TEST 29: get() returns hit level for nil hits --- http_config eval: $::HttpConfig --- config location = /t { @@ -1321,7 +1359,7 @@ hit level from shm: 2 -=== TEST 29: get() returns hit level for boolean false hits +=== TEST 30: get() returns hit level for boolean false hits --- skip_eval: 3: t::Util::skip_openresty('<', '1.11.2.3') --- http_config eval: $::HttpConfig --- config @@ -1360,7 +1398,7 @@ hit level from shm: 2 -=== TEST 30: get() JITs when hit coming from LRU +=== TEST 31: get() JITs when hit coming from LRU --- http_config eval: $::HttpConfig --- config location = /t { @@ -1390,7 +1428,7 @@ qr/\[TRACE\s+\d+ content_by_lua\(nginx\.conf:\d+\):10 loop\]/ -=== TEST 31: get() JITs when hit of scalar value coming from shm +=== TEST 32: get() JITs when hit of scalar value coming from shm --- http_config eval: $::HttpConfig --- config location = /t { @@ -1454,8 +1492,8 @@ GET /t -=== TEST 32: get() JITs when hit of table value coming from shm ---- SKIP: blocked until custom table serializer +=== TEST 33: get() JITs when hit of table value coming from shm +--- SKIP: blocked until l2_serializer --- http_config eval: $::HttpConfig --- config location = /t { @@ -1488,7 +1526,7 @@ qr/\[TRACE\s+\d+ content_by_lua\(nginx\.conf:\d+\):18 loop\]/ -=== TEST 33: get() JITs when miss coming from LRU +=== TEST 34: get() JITs when miss coming from LRU --- http_config eval: $::HttpConfig --- config location = /t { @@ -1519,7 +1557,7 @@ qr/\[TRACE\s+\d+ content_by_lua\(nginx\.conf:\d+\):10 loop\]/ -=== TEST 34: get() JITs when miss coming from shm +=== TEST 35: get() JITs when miss coming from shm --- http_config eval: $::HttpConfig --- config location = /t { @@ -1552,7 +1590,7 @@ qr/\[TRACE\s+\d+ content_by_lua\(nginx\.conf:\d+\):10 loop\]/ -=== TEST 35: get() callback can return nil + err (string) +=== TEST 36: get() callback can return nil + err (string) --- http_config eval: $::HttpConfig --- config location = /t { @@ -1592,7 +1630,7 @@ cb2 return values: foo an error occurred again -=== TEST 36: get() callback can return nil + err (non-string) safely +=== TEST 37: get() callback can return nil + err (non-string) safely --- http_config eval: $::HttpConfig --- config location = /t { @@ -1631,7 +1669,7 @@ cb2 return values: foo table: 0x[[:xdigit:]]+ -=== TEST 37: get() callback can return nil + err (table) and will call __tostring +=== TEST 38: get() callback can return nil + err (table) and will call __tostring --- http_config eval: $::HttpConfig --- config location = /t { @@ -1664,7 +1702,7 @@ cb return values: nil hello from __tostring -=== TEST 38: get() callback's 3th return value can override the ttl +=== TEST 39: get() callback's 3th return value can override the ttl --- http_config eval: $::HttpConfig --- config location = /t { @@ -1715,7 +1753,7 @@ in callback 2 -=== TEST 39: get() callback's 3th return value can override the neg_ttl +=== TEST 40: get() callback's 3th return value can override the neg_ttl --- http_config eval: $::HttpConfig --- config location = /t { @@ -1766,7 +1804,7 @@ in callback 2 -=== TEST 40: get() ignores invalid callback 3rd return value (not number) +=== TEST 41: get() ignores invalid callback 3rd return value (not number) --- http_config eval: $::HttpConfig --- config location = /t { @@ -1845,7 +1883,7 @@ in positive callback -=== TEST 41: get() passes 'resty_lock_opts' for L3 calls +=== TEST 42: get() passes 'resty_lock_opts' for L3 calls --- http_config eval: $::HttpConfig --- config location = /t { @@ -1888,7 +1926,7 @@ was given 'opts.resty_lock_opts': true -=== TEST 42: get() errors on lock timeout +=== TEST 43: get() errors on lock timeout --- http_config eval: $::HttpConfig --- config location = /t { @@ -1968,7 +2006,7 @@ hit_lvl: 1 -=== TEST 43: get() returns data even if failed to set in shm +=== TEST 44: get() returns data even if failed to set in shm --- http_config eval: $::HttpConfig --- config location = /t { @@ -2020,7 +2058,7 @@ qr/\[warn\] .*? could not write to lua_shared_dict 'cache_shm' after 3 tries \(n -=== TEST 44: get() errors on invalid opts.shm_set_tries +=== TEST 45: get() errors on invalid opts.shm_set_tries --- http_config eval: $::HttpConfig --- config location = /t { @@ -2060,7 +2098,7 @@ opts.shm_set_tries must be >= 1 -=== TEST 45: get() with default shm_set_tries to LRU evict items when a large value is being cached +=== TEST 46: get() with default shm_set_tries to LRU evict items when a large value is being cached --- http_config eval: $::HttpConfig --- config location = /t { @@ -2131,7 +2169,7 @@ callback was called: 1 times -=== TEST 46: get() respects instance opts.shm_set_tries to LRU evict items when a large value is being cached +=== TEST 47: get() respects instance opts.shm_set_tries to LRU evict items when a large value is being cached --- http_config eval: $::HttpConfig --- config location = /t { @@ -2204,7 +2242,7 @@ callback was called: 1 times -=== TEST 47: get() accepts opts.shm_set_tries to LRU evict items when a large value is being cached +=== TEST 48: get() accepts opts.shm_set_tries to LRU evict items when a large value is being cached --- http_config eval: $::HttpConfig --- config location = /t { @@ -2277,7 +2315,7 @@ callback was called: 1 times -=== TEST 48: get() caches data in L1 LRU even if failed to set in shm +=== TEST 49: get() caches data in L1 LRU even if failed to set in shm --- http_config eval: $::HttpConfig --- config location = /t { @@ -2341,7 +2379,7 @@ is stale: true -=== TEST 49: get() does not cache value in LRU indefinitely when retrieved from shm on last ms (see GH PR #58) +=== TEST 50: get() does not cache value in LRU indefinitely when retrieved from shm on last ms (see GH PR #58) --- http_config eval: $::HttpConfig --- config location = /t { @@ -2406,7 +2444,7 @@ GET /t -=== TEST 50: get() bypass cache for negative callback TTL +=== TEST 51: get() bypass cache for negative callback TTL --- http_config eval: $::HttpConfig --- config location = /t { @@ -2470,3 +2508,195 @@ in negative callback in negative callback --- no_error_log [error] + + + +=== TEST 52: get() nil callback returns positive cached items from L1/L2 +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local mlcache = require "resty.mlcache" + + local cache = assert(mlcache.new("my_mlcache", "cache_shm")) + + -- miss lookup + + local data, err, hit_lvl = cache:get("key") + if err then + ngx.log(ngx.ERR, err) + end + ngx.say("-> miss") + ngx.say("data: ", data) + ngx.say("err: ", err) + ngx.say("hit_lvl: ", hit_lvl) + + -- cache an item + + local _, err = cache:get("key", nil, function() return 123 end) + if err then + ngx.log(ngx.ERR, err) + end + + -- hit from lru + + local data, err, hit_lvl = cache:get("key") + ngx.say() + ngx.say("-> from LRU") + ngx.say("data: ", data) + ngx.say("err: ", err) + ngx.say("hit_lvl: ", hit_lvl) + + -- hit from shm + + cache.lru:delete("key") + + local data, err, hit_lvl = cache:get("key") + ngx.say() + ngx.say("-> from shm") + ngx.say("data: ", data) + ngx.say("err: ", err) + ngx.say("hit_lvl: ", hit_lvl) + + -- promoted to lru again + + local data, err, hit_lvl = cache:get("key") + ngx.say() + ngx.say("-> promoted to LRU") + ngx.say("data: ", data) + ngx.say("err: ", err) + ngx.say("hit_lvl: ", hit_lvl) + } + } +--- request +GET /t +--- response_body +-> miss +data: nil +err: nil +hit_lvl: -1 + +-> from LRU +data: 123 +err: nil +hit_lvl: 1 + +-> from shm +data: 123 +err: nil +hit_lvl: 2 + +-> promoted to LRU +data: 123 +err: nil +hit_lvl: 1 +--- no_error_log +[error] + + + +=== TEST 53: get() nil callback returns negative cached items from L1/L2 +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local mlcache = require "resty.mlcache" + + local cache = assert(mlcache.new("my_mlcache", "cache_shm")) + + -- miss lookup + + local data, err, hit_lvl = cache:get("key") + if err then + ngx.log(ngx.ERR, err) + end + ngx.say("-> miss") + ngx.say("data: ", data) + ngx.say("err: ", err) + ngx.say("hit_lvl: ", hit_lvl) + + -- cache an item + + local _, err = cache:get("key", nil, function() return nil end) + if err then + ngx.log(ngx.ERR, err) + end + + -- hit from lru + + local data, err, hit_lvl = cache:get("key") + ngx.say() + ngx.say("-> from LRU") + ngx.say("data: ", data) + ngx.say("err: ", err) + ngx.say("hit_lvl: ", hit_lvl) + + -- hit from shm + + cache.lru:delete("key") + + local data, err, hit_lvl = cache:get("key") + ngx.say() + ngx.say("-> from shm") + ngx.say("data: ", data) + ngx.say("err: ", err) + ngx.say("hit_lvl: ", hit_lvl) + + -- promoted to lru again + + local data, err, hit_lvl = cache:get("key") + ngx.say() + ngx.say("-> promoted to LRU") + ngx.say("data: ", data) + ngx.say("err: ", err) + ngx.say("hit_lvl: ", hit_lvl) + } + } +--- request +GET /t +--- response_body +-> miss +data: nil +err: nil +hit_lvl: -1 + +-> from LRU +data: nil +err: nil +hit_lvl: 1 + +-> from shm +data: nil +err: nil +hit_lvl: 2 + +-> promoted to LRU +data: nil +err: nil +hit_lvl: 1 +--- no_error_log +[error] + + + +=== TEST 54: get() JITs on misses without a callback +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local mlcache = require "resty.mlcache" + + local cache = assert(mlcache.new("my_mlcache", "cache_shm")) + + for i = 1, 10e3 do + cache:get("key") + end + } + } +--- request +GET /t +--- ignore_response_body +--- error_log eval +qr/\[TRACE\s+\d+ content_by_lua\(nginx\.conf:\d+\):6 loop\]/ +--- no_error_log +[error] diff --git a/t/03-peek.t b/t/03-peek.t index fdeb96c1d..bee4b9da0 100644 --- a/t/03-peek.t +++ b/t/03-peek.t @@ -151,7 +151,47 @@ ttl: 18 -=== TEST 4: peek() returns remaining ttl if shm_miss is specified +=== TEST 4: peek() returns a negative ttl when a key expired +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local mlcache = require "resty.mlcache" + + local cache = assert(mlcache.new("my_mlcache", "cache_shm")) + + local function cb() + return nil + end + + local val, err = cache:get("my_key", { neg_ttl = 0 }, cb) + if err then + ngx.log(ngx.ERR, err) + return + end + + ngx.sleep(1) + + local ttl = assert(cache:peek("my_key")) + ngx.say("ttl: ", math.ceil(ttl)) + + ngx.sleep(1) + + local ttl = assert(cache:peek("my_key")) + ngx.say("ttl: ", math.ceil(ttl)) + } + } +--- request +GET /t +--- response_body +ttl: -1 +ttl: -2 +--- no_error_log +[error] + + + +=== TEST 5: peek() returns remaining ttl if shm_miss is specified --- http_config eval: $::HttpConfig --- config location = /t { @@ -201,7 +241,7 @@ ttl: 18 -=== TEST 5: peek() returns the value if a key has been fetched before +=== TEST 6: peek() returns the value if a key has been fetched before --- http_config eval: $::HttpConfig --- config location = /t { @@ -261,7 +301,7 @@ ttl: \d* nil_val: nil -=== TEST 6: peek() returns the value if shm_miss is specified +=== TEST 7: peek() returns the value if shm_miss is specified --- http_config eval: $::HttpConfig --- config location = /t { @@ -300,7 +340,7 @@ ttl: \d* nil_val: nil -=== TEST 7: peek() JITs on hit +=== TEST 8: peek() JITs on hit --- http_config eval: $::HttpConfig --- config location = /t { @@ -332,7 +372,7 @@ qr/\[TRACE\s+\d+ content_by_lua\(nginx\.conf:\d+\):13 loop\]/ -=== TEST 8: peek() JITs on miss +=== TEST 9: peek() JITs on miss --- http_config eval: $::HttpConfig --- config location = /t { @@ -360,7 +400,7 @@ qr/\[TRACE\s+\d+ content_by_lua\(nginx\.conf:\d+\):6 loop\]/ -=== TEST 9: peek() returns nil if a value expired +=== TEST 10: peek() returns nil if a value expired --- http_config eval: $::HttpConfig --- config location = /t { @@ -401,7 +441,7 @@ stale: nil -=== TEST 10: peek() returns nil if a value expired in 'shm_miss' +=== TEST 11: peek() returns nil if a value expired in 'shm_miss' --- http_config eval: $::HttpConfig --- config location = /t { @@ -448,7 +488,7 @@ stale: nil -=== TEST 11: peek() accepts stale arg and returns stale values +=== TEST 12: peek() accepts stale arg and returns stale values --- http_config eval: $::HttpConfig --- config location = /t { @@ -489,7 +529,7 @@ stale: true -=== TEST 12: peek() accepts stale arg and returns stale values from 'shm_miss' +=== TEST 13: peek() accepts stale arg and returns stale values from 'shm_miss' --- http_config eval: $::HttpConfig --- config location = /t { @@ -536,7 +576,7 @@ stale: true -=== TEST 13: peek() does not evict stale items from L2 shm +=== TEST 14: peek() does not evict stale items from L2 shm --- http_config eval: $::HttpConfig --- config location = /t { @@ -581,7 +621,7 @@ data: 123 -=== TEST 14: peek() does not evict stale negative data from L2 shm_miss +=== TEST 15: peek() does not evict stale negative data from L2 shm_miss --- http_config eval: $::HttpConfig --- config location = /t { diff --git a/t/09-isolation.t b/t/09-isolation.t index 9e88d5b18..0ce0b40b9 100644 --- a/t/09-isolation.t +++ b/t/09-isolation.t @@ -87,7 +87,7 @@ lua-resty-lru instances are the same: false -- prove LRU survived - ngx.say(cache_2.lru:get("key")) + ngx.say((cache_2.lru:get("key"))) -- GC cache_2 (and the LRU this time, since no more references) @@ -101,7 +101,7 @@ lua-resty-lru instances are the same: false -- this is a new LRU, it has nothing in it - ngx.say(cache_2.lru:get("key")) + ngx.say((cache_2.lru:get("key"))) } } --- request diff --git a/t/13-get_bulk.t b/t/13-get_bulk.t index 428ff1564..7b6498a91 100644 --- a/t/13-get_bulk.t +++ b/t/13-get_bulk.t @@ -128,6 +128,15 @@ key at index 5 must be a string for operation 2 (got boolean) local mlcache = require "resty.mlcache" local cache = assert(mlcache.new("my_mlcache", "cache_shm")) + local pok, perr = pcall(cache.get_bulk, cache, { + "key_b", nil, nil, nil, + "key_a", nil, function() return 1 end, nil, + n = 2, + }) + if not pok then + ngx.say(perr) + end + local pok, perr = pcall(cache.get_bulk, cache, { "key_a", nil, function() return 1 end, nil, "key_b", nil, false, nil, @@ -141,6 +150,7 @@ key at index 5 must be a string for operation 2 (got boolean) --- request GET /t --- response_body +callback at index 3 must be a function for operation 1 (got nil) callback at index 7 must be a function for operation 2 (got boolean) --- no_error_log [error] diff --git a/t/Util.pm b/t/Util.pm index d68748a5e..f8a14f3b6 100644 --- a/t/Util.pm +++ b/t/Util.pm @@ -1,3 +1,4 @@ +use strict; package t::Util; sub get_openresty_canon_version (@) { @@ -46,3 +47,5 @@ our @EXPORT = qw( ); 1; + +# vim: set ft=perl: