diff --git a/.credo.exs b/.credo.exs index 6537d951..db0f4c3c 100644 --- a/.credo.exs +++ b/.credo.exs @@ -117,9 +117,9 @@ ## Refactoring Opportunities # {Credo.Check.Refactor.CondStatements, []}, - {Credo.Check.Refactor.CyclomaticComplexity, []}, + {Credo.Check.Refactor.CyclomaticComplexity, [max_complexity: 40]}, {Credo.Check.Refactor.FunctionArity, []}, - {Credo.Check.Refactor.LongQuoteBlocks, [max_line_count: 300, ignore_comments: true]}, + {Credo.Check.Refactor.LongQuoteBlocks, [max_line_count: 200]}, # {Credo.Check.Refactor.MapInto, []}, {Credo.Check.Refactor.MatchInCondition, []}, {Credo.Check.Refactor.NegatedConditionsInUnless, []}, diff --git a/.dialyzer_ignore.exs b/.dialyzer_ignore.exs deleted file mode 100644 index 34636911..00000000 --- a/.dialyzer_ignore.exs +++ /dev/null @@ -1,6 +0,0 @@ -[ - ~r/Function :persistent_term.get\/1\ does\ not\ exist\./, - ~r/Function :persistent_term.get\/2\ does\ not\ exist\./, - ~r/Function :persistent_term.put\/2\ does\ not\ exist\./, - ~r/Function :persistent_term.erase\/1\ does\ not\ exist\./ -] diff --git a/.doctor.exs b/.doctor.exs new file mode 100644 index 00000000..81f3871a --- /dev/null +++ b/.doctor.exs @@ -0,0 +1,20 @@ +%Doctor.Config{ + ignore_modules: [ + Nebulex.Cache.Impl, + Nebulex.Cache.Options, + Nebulex.Caching.Options, + Nebulex.Dialyzer.CachingDecorators + ], + ignore_paths: [], + min_module_doc_coverage: 40, + min_module_spec_coverage: 0, + min_overall_doc_coverage: 80, + min_overall_moduledoc_coverage: 100, + min_overall_spec_coverage: 0, + exception_moduledoc_required: true, + raise: false, + reporter: Doctor.Reporters.Full, + struct_type_spec_required: true, + umbrella: false, + failed: false +} diff --git a/.formatter.exs b/.formatter.exs index bdf866e8..e2ac7370 100644 --- a/.formatter.exs +++ b/.formatter.exs @@ -1,5 +1,21 @@ locals_without_parens = [ + # Nebulex.Utils + unwrap_or_raise: 1, + wrap_ok: 1, + wrap_error: 1, + wrap_error: 2, + + # Nebulex.Cache.Utils + defcacheapi: 2, + + # Nebulex.Adapter + defcommand: 1, + defcommand: 2, + defcommandp: 1, + defcommandp: 2, + # Nebulex.Caching + dynamic_cache: 2, keyref: 1, keyref: 2, diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index cd8d56a0..9e7a66cd 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -18,23 +18,26 @@ jobs: strategy: matrix: include: - - elixir: 1.14.x + - elixir: 1.15.x + otp: 26.x + os: 'ubuntu-latest' + - elixir: 1.15.x otp: 25.x os: 'ubuntu-latest' style: true coverage: true sobelow: true dialyzer: true + doctor: true + - elixir: 1.14.x + otp: 25.x + os: 'ubuntu-latest' - elixir: 1.13.x otp: 24.x os: 'ubuntu-latest' - - elixir: 1.11.x + - elixir: 1.12.x otp: 23.x os: 'ubuntu-20.04' - inch-report: true - - elixir: 1.9.x - otp: 22.x - os: 'ubuntu-20.04' env: GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}' @@ -87,19 +90,15 @@ jobs: if: ${{ matrix.style }} - name: Run tests - run: | - epmd -daemon - mix test --trace + run: mix test if: ${{ !matrix.coverage }} - name: Run tests with coverage - run: | - epmd -daemon - mix coveralls.github + run: mix coveralls.github if: ${{ matrix.coverage }} - name: Run sobelow - run: mix sobelow --exit --skip + run: mix sobelow --skip --exit Low if: ${{ matrix.sobelow }} - name: Restore PLT Cache @@ -107,7 +106,7 @@ jobs: id: plt-cache with: path: priv/plts - key: ${{ runner.os }}-${{ matrix.otp }}-${{ matrix.elixir }}-plt-v1 + key: ${{ runner.os }}-${{ matrix.otp }}-${{ matrix.elixir }}-plt-v3-1 if: ${{ matrix.dialyzer }} - name: Create PLTs @@ -120,6 +119,6 @@ jobs: run: mix dialyzer --format github if: ${{ matrix.dialyzer && steps.plt-cache.outputs.cache-hit != 'true' }} - - name: Doc coverage report - run: MIX_ENV=docs mix inch.report - if: ${{ matrix.inch-report }} + - name: Run documentation health check + run: mix doctor + if: ${{ matrix.doctor }} diff --git a/.gitignore b/.gitignore index 076b29cc..20270cae 100644 --- a/.gitignore +++ b/.gitignore @@ -37,3 +37,4 @@ erl_crash.dump /priv .sobelow* /config +Elixir* diff --git a/CHANGELOG.md b/CHANGELOG.md index 4bffdcbd..c93e0a0d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,138 +4,6 @@ All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [v2.5.2](https://github.com/cabol/nebulex/tree/v2.5.2) (2023-07-14) - -[Full Changelog](https://github.com/cabol/nebulex/compare/v2.5.1...v2.5.2) - -**Closed issues:** - -- Replicated adapter syncing during rolling deployment. - [#209](https://github.com/cabol/nebulex/issues/209) -- Ambiguity regarding ttl and `gc_interval` relation. - [#208](https://github.com/cabol/nebulex/issues/208) -- Seeing Nebulex.RPCError during deployments with partitioned adapter. - [#206](https://github.com/cabol/nebulex/issues/206) -- Random `:erpc`, `:timeout` with partitioned get. - [#202](https://github.com/cabol/nebulex/issues/202) -- Processes reading from cache blocked by generational gc process. - [#197](https://github.com/cabol/nebulex/issues/197) - -**Merged pull requests:** - -- Delay flushing ets table to avoid blocking processes using it. - [#210](https://github.com/cabol/nebulex/pull/210) - ([szajbus](https://github.com/szajbus)) - -## [v2.5.1](https://github.com/cabol/nebulex/tree/v2.5.1) (2023-05-27) - -[Full Changelog](https://github.com/cabol/nebulex/compare/v2.5.0...v2.5.1) - -**Merged pull requests:** - -- Fix `nil` check in `Nebulex.Adapters.Multilevel.get/3` - [#205](https://github.com/cabol/nebulex/pull/205) - ([1100x1100](https://github.com/1100x1100)) -- `mix nbx.gen.cache` example fixed - [#204](https://github.com/cabol/nebulex/pull/204) - ([hissssst](https://github.com/hissssst)) - -## [v2.5.0](https://github.com/cabol/nebulex/tree/v2.5.0) (2023-05-13) - -[Full Changelog](https://github.com/cabol/nebulex/compare/v2.4.2...v2.5.0) - -**Implemented enhancements:** - -- Support for functions that can set TTL in Decorator similar to Match - [#200](https://github.com/cabol/nebulex/issues/200) -- Improve default match function in decorators to cover more scenarios - [#177](https://github.com/cabol/nebulex/issues/177) -- Adapters implementation guide - [#96](https://github.com/cabol/nebulex/issues/96) - -**Fixed bugs:** - -- Issue with keys set to `false` when calling `get_all` in local adapter - [#187](https://github.com/cabol/nebulex/issues/187) - -**Closed issues:** - -- Is there any way to get the size of the cache? - [#203](https://github.com/cabol/nebulex/issues/203) -- Where to use load/2, dump/2 - [#201](https://github.com/cabol/nebulex/issues/201) -- `Nebulex.Cache` callbacks mention "Shared Options" section that do not exist - [#199](https://github.com/cabol/nebulex/issues/199) -- Errors when storing nil values - [#195](https://github.com/cabol/nebulex/issues/195) -- Unregistering cache in registry happens after cache shuts down - [#194](https://github.com/cabol/nebulex/issues/194) -- Is there a good way to evict multiple caches at once by some conditions? - [#192](https://github.com/cabol/nebulex/issues/192) -- Unable to use module attributes when specifying a MFA cache within the decorator - [#191](https://github.com/cabol/nebulex/issues/191) -- Nebulex crash when `gc_interval` is not set - [#182](https://github.com/cabol/nebulex/issues/182) -- `ArgumentError` * 1st argument: the table identifier does not refer to an existing ETS table - [#181](https://github.com/cabol/nebulex/issues/181) -- Feedback for `NebulexLocalDistributedAdapter` - [#180](https://github.com/cabol/nebulex/issues/180) -- Multilevel invalidation - [#179](https://github.com/cabol/nebulex/issues/179) -- External cache-key references on `cacheable` decorator - [#178](https://github.com/cabol/nebulex/issues/178) -- [multiple clause functions] Cannot use ignored variables in decorator keys - [#173](https://github.com/cabol/nebulex/issues/173) -- Ability for referencing a key in the `cacheable` decorator via `:references` option - [#169](https://github.com/cabol/nebulex/issues/169) -- Multi level caching suggestion? - [#168](https://github.com/cabol/nebulex/issues/168) - -**Merged pull requests:** - -- Fix `Local.get_all` with false values - [#186](https://github.com/cabol/nebulex/pull/186) - ([renatoaguiar](https://github.com/renatoaguiar)) -- Add NebulexLocalMultilevelAdapter to the list - [#185](https://github.com/cabol/nebulex/pull/185) - ([martosaur](https://github.com/martosaur)) -- Fix the crash when `gc_interval` is not set - [#183](https://github.com/cabol/nebulex/pull/183) - ([dongfuye](https://github.com/dongfuye)) -- [#169] Reference a key in `cacheable` decorator via `:references` option - [#176](https://github.com/cabol/nebulex/pull/176) - ([cabol](https://github.com/cabol)) -- Creating New Adapter guide - [#175](https://github.com/cabol/nebulex/pull/175) - ([martosaur](https://github.com/martosaur)) - -## [v2.4.2](https://github.com/cabol/nebulex/tree/v2.4.2) (2022-11-04) - -[Full Changelog](https://github.com/cabol/nebulex/compare/v2.4.1...v2.4.2) - -**Closed issues:** - -- Adapter configuration per-env? - [#171](https://github.com/cabol/nebulex/issues/171) -- On-change handler for write-through decorators - [#165](https://github.com/cabol/nebulex/issues/165) -- Document test env setup with decorators? - [#155](https://github.com/cabol/nebulex/issues/155) -- Managing Failovers in the cluster - [#131](https://github.com/cabol/nebulex/issues/131) - -**Merged pull requests:** - -- Make Multilevel adapter apply deletes in reverse order - [#174](https://github.com/cabol/nebulex/pull/174) - ([martosaur](https://github.com/martosaur)) -- Use import Bitwise instead of use Bitwise - [#172](https://github.com/cabol/nebulex/pull/172) - ([ryvasquez](https://github.com/ryvasquez)) -- Fix result of getting value by non existent key - [#166](https://github.com/cabol/nebulex/pull/166) - ([fuelen](https://github.com/fuelen)) - ## [v2.4.1](https://github.com/cabol/nebulex/tree/v2.4.1) (2022-07-10) [Full Changelog](https://github.com/cabol/nebulex/compare/v2.4.0...v2.4.1) diff --git a/README.md b/README.md index 75ebaea5..3e393d20 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,6 @@ ![CI](https://github.com/cabol/nebulex/workflows/CI/badge.svg) [![Coverage Status](https://img.shields.io/coveralls/cabol/nebulex.svg)](https://coveralls.io/github/cabol/nebulex) -[![Inline docs](http://inch-ci.org/github/cabol/nebulex.svg)](http://inch-ci.org/github/cabol/nebulex) [![Hex Version](https://img.shields.io/hexpm/v/nebulex.svg)](https://hex.pm/packages/nebulex) [![Docs](https://img.shields.io/badge/docs-hexpm-blue.svg)](https://hexdocs.pm/nebulex) [![License](https://img.shields.io/hexpm/l/nebulex.svg)](LICENSE) @@ -17,7 +16,7 @@ underlying caching implementations, such as [Redis][redis], [Memcached][memcached], or even other Elixir cache implementations like [Cachex][cachex]. Additionally, it provides totally out-of-box features such as [cache usage patterns][cache_patterns], -[declarative annotation-based caching][nbx_caching], and +[declarative decorator-based caching][nbx_caching], and [distributed cache topologies][cache_topologies], among others. See the [getting started guide](http://hexdocs.pm/nebulex/getting-started.html) @@ -28,52 +27,50 @@ for more information. [cachex]: https://github.com/whitfin/cachex [redis]: https://redis.io/ [memcached]: https://memcached.org/ -[nbx_caching]: http://hexdocs.pm/nebulex/Nebulex.Caching.html +[nbx_caching]: http://hexdocs.pm/nebulex/Nebulex.Caching.Decorators.html [cache_patterns]: http://hexdocs.pm/nebulex/cache-usage-patterns.html [cache_topologies]: https://docs.oracle.com/middleware/1221/coherence/develop-applications/cache_intro.htm ## Usage -You need to add `nebulex` as a dependency to your `mix.exs` file. However, in -the case you want to use an external (a non built-in adapter) cache adapter, -you also have to add the proper dependency to your `mix.exs` file. - -The supported caches and their adapters are: +You need to add both Nebulex and the cache adapter as a dependency to your +`mix.exs` file. The supported caches and their adapters are: Cache | Nebulex Adapter | Dependency :-----| :---------------| :--------- -Generational Local Cache | [Nebulex.Adapters.Local][la] | Built-In -Partitioned | [Nebulex.Adapters.Partitioned][pa] | Built-In -Replicated | [Nebulex.Adapters.Replicated][ra] | Built-In -Multilevel | [Nebulex.Adapters.Multilevel][ma] | Built-In Nil (special adapter that disables the cache) | [Nebulex.Adapters.Nil][nil] | Built-In -Cachex | Nebulex.Adapters.Cachex | [nebulex_adapters_cachex][nbx_cachex] +Generational Local Cache | Nebulex.Adapters.Local | [nebulex_adapters_local][la] +Partitioned | Nebulex.Adapters.Partitioned | [nebulex_adapters_partitioned][pa] +Replicated | Nebulex.Adapters.Replicated | [nebulex_adapters_replicated][ra] +Multilevel | Nebulex.Adapters.Multilevel | [nebulex_adapters_multilevel][ma] Redis | NebulexRedisAdapter | [nebulex_redis_adapter][nbx_redis] +Cachex | Nebulex.Adapters.Cachex | [nebulex_adapters_cachex][nbx_cachex] Distributed with Horde | Nebulex.Adapters.Horde | [nebulex_adapters_horde][nbx_horde] Multilevel with cluster broadcasting | NebulexLocalMultilevelAdapter | [nebulex_local_multilevel_adapter][nbx_local_multilevel] -Ecto Postgres table | Nebulex.Adapters.Ecto | [nebulex_adapters_ecto][nbx_ecto_postgres] -[la]: http://hexdocs.pm/nebulex/Nebulex.Adapters.Local.html -[pa]: http://hexdocs.pm/nebulex/Nebulex.Adapters.Partitioned.html -[ra]: http://hexdocs.pm/nebulex/Nebulex.Adapters.Replicated.html -[ma]: http://hexdocs.pm/nebulex/Nebulex.Adapters.Multilevel.html [nil]: http://hexdocs.pm/nebulex/Nebulex.Adapters.Nil.html -[nbx_cachex]: https://github.com/cabol/nebulex_adapters_cachex +[la]: https://github.com/elixir-nebulex/nebulex_adapters_local +[pa]: https://github.com/elixir-nebulex/nebulex_adapters_partitioned +[ra]: https://github.com/elixir-nebulex/nebulex_adapters_replicated +[ma]: https://github.com/elixir-nebulex/nebulex_adapters_multilevel [nbx_redis]: https://github.com/cabol/nebulex_redis_adapter +[nbx_cachex]: https://github.com/cabol/nebulex_adapters_cachex [nbx_horde]: https://github.com/eliasdarruda/nebulex_adapters_horde [nbx_local_multilevel]: https://github.com/slab/nebulex_local_multilevel_adapter -[nbx_ecto_postgres]: https://github.com/hissssst/nebulex_adapters_ecto - -For example, if you want to use a built-in cache, add to your `mix.exs` file: +For example, if you want to use `Nebulex.Adapters.Local`, add to your `mix.exs` +file: ```elixir def deps do [ - {:nebulex, "~> 2.5"}, - {:shards, "~> 1.1"}, #=> When using :shards as backend - {:decorator, "~> 1.4"}, #=> When using Caching Annotations - {:telemetry, "~> 1.0"} #=> When using the Telemetry events (Nebulex stats) + {:nebulex, "~> 3.0"}, + #=> When using the local cache adapter + {:nebulex_adapters_local, "~> 3.0"}, + #=> When using Caching Annotations + {:decorator, "~> 1.4"}, + #=> When using the Telemetry events + {:telemetry, "~> 1.2"} ] end ``` @@ -81,21 +78,14 @@ end In order to give more flexibility and fetch only needed dependencies, Nebulex makes all dependencies optional. For example: - * For intensive workloads, you may want to use `:shards` as the backend for - the local adapter and having partitioned tables. In such a case, you have - to add `:shards` to the dependency list. - * For enabling the usage of - [declarative annotation-based caching via decorators][nbx_caching], + [declarative decorator-based caching via decorators][nbx_caching], you have to add `:decorator` to the dependency list. * For enabling Telemetry events to be dispatched when using Nebulex, you have to add `:telemetry` to the dependency list. See [telemetry guide][telemetry]. - * If you want to use an external adapter (e.g: Cachex or Redis adapter), you - have to add the adapter dependency too. - [telemetry]: http://hexdocs.pm/nebulex/telemetry.html Then run `mix deps.get` in your shell to fetch the dependencies. If you want to @@ -103,7 +93,7 @@ use another cache adapter, just choose the proper dependency from the table above. Finally, in the cache definition, you will need to specify the `adapter:` -respective to the chosen dependency. For the local built-in cache it is: +respective to the chosen dependency. For the local cache would be: ```elixir defmodule MyApp.Cache do @@ -113,28 +103,29 @@ defmodule MyApp.Cache do end ``` -## Quickstart example +## Quickstart example using caching decorators Assuming you are using `Ecto` and you want to use declarative caching: ```elixir # In the config/config.exs file -config :my_app, MyApp.PartitionedCache, - primary: [ - gc_interval: :timer.hours(12), - backend: :shards, - partitions: 2 - ] - -# Defining a Cache with a partitioned topology -defmodule MyApp.PartitionedCache do +config :my_app, MyApp.Cache, + gc_interval: :timer.hours(12), + # Max 1M entries + max_size: 1_000_000, + # Max 2GB of memory + allocated_memory: 2_000_000_000, + gc_cleanup_min_timeout: :timer.seconds(10), + gc_cleanup_max_timeout: :timer.minutes(10) + +# Defining the cache +defmodule MyApp.Cache do use Nebulex.Cache, otp_app: :my_app, - adapter: Nebulex.Adapters.Partitioned, - primary_storage_adapter: Nebulex.Adapters.Local + adapter: Nebulex.Adapters.Local end -# Some Ecto schema +# Ecto schema defmodule MyApp.Accounts.User do use Ecto.Schema @@ -153,26 +144,24 @@ end # The Accounts context defmodule MyApp.Accounts do - use Nebulex.Caching + use Nebulex.Caching, cache: MyApp.Cache alias MyApp.Accounts.User - alias MyApp.PartitionedCache, as: Cache alias MyApp.Repo @ttl :timer.hours(1) - @decorate cacheable(cache: Cache, key: {User, id}, opts: [ttl: @ttl]) + @decorate cacheable(key: {User, id}, opts: [ttl: @ttl]) def get_user!(id) do Repo.get!(User, id) end - @decorate cacheable(cache: Cache, key: {User, username}, opts: [ttl: @ttl]) + @decorate cacheable(key: {User, username}, opts: [ttl: @ttl]) def get_user_by_username(username) do Repo.get_by(User, [username: username]) end @decorate cache_put( - cache: Cache, keys: [{User, user.id}, {User, user.username}], match: &match_update/1, opts: [ttl: @ttl] @@ -183,10 +172,7 @@ defmodule MyApp.Accounts do |> Repo.update() end - @decorate cache_evict( - cache: Cache, - keys: [{User, user.id}, {User, user.username}] - ) + @decorate cache_evict(keys: [{User, user.id}, {User, user.username}]) def delete_user(%User{} = user) do Repo.delete(user) end @@ -239,27 +225,12 @@ the directory [benchmarks](./benchmarks). To run a benchmark test you have to run: ``` -$ MIX_ENV=test mix run benchmarks/{BENCH_TEST_FILE} -``` - -Where `BENCH_TEST_FILE` can be any of: - - * `local_with_ets_bench.exs`: benchmark for the local adapter using - `:ets` backend. - * `local_with_shards_bench.exs`: benchmark for the local adapter using - `:shards` backend. - * `partitioned_bench.exs`: benchmark for the partitioned adapter. - -For example, for running the benchmark for the local adapter using `:shards` -backend: - -``` -$ MIX_ENV=test mix run benchmarks/local_with_shards_bench.exs +$ mix run benchmarks/benchmark.exs ``` -Additionally, you can also run performance tests using `:basho_bench`. -See [nebulex_bench example](https://github.com/cabol/nebulex_examples/tree/master/nebulex_bench) -for more information. +> The benchmark uses the adapter `Nebulex.Adapters.Nil`; it is more focused on +> measuring the Nebulex abstraction layer performance rather than a specific +> adapter. ## Contributing diff --git a/benchmarks/bench_helper.exs b/benchmarks/bench_helper.exs deleted file mode 100644 index 18c816bd..00000000 --- a/benchmarks/bench_helper.exs +++ /dev/null @@ -1,77 +0,0 @@ -defmodule BenchHelper do - @moduledoc """ - Benchmark commons. - """ - - @doc false - def benchmarks(cache) do - %{ - "get" => fn input -> - cache.get(input) - end, - "get_all" => fn input -> - cache.get_all([input, "foo", "bar"]) - end, - "put" => fn input -> - cache.put(input, input) - end, - "put_new" => fn input -> - cache.put_new(input, input) - end, - "replace" => fn input -> - cache.replace(input, input) - end, - "put_all" => fn input -> - cache.put_all([{input, input}, {"foo", "bar"}]) - end, - "delete" => fn input -> - cache.delete(input) - end, - "take" => fn input -> - cache.take(input) - end, - "has_key?" => fn input -> - cache.has_key?(input) - end, - "count_all" => fn _input -> - cache.count_all() - end, - "ttl" => fn input -> - cache.ttl(input) - end, - "expire" => fn input -> - cache.expire(input, 1) - end, - "incr" => fn _input -> - cache.incr(:counter, 1) - end, - "update" => fn input -> - cache.update(input, 1, &Kernel.+(&1, 1)) - end, - "all" => fn _input -> - cache.all() - end - } - end - - @doc false - def run(benchmarks, opts \\ []) do - Benchee.run( - benchmarks, - Keyword.merge( - [ - inputs: %{"rand" => 100_000}, - before_each: fn n -> :rand.uniform(n) end, - formatters: [ - {Benchee.Formatters.Console, comparison: false, extended_statistics: true}, - {Benchee.Formatters.HTML, extended_statistics: true, auto_open: false} - ], - print: [ - fast_warning: false - ] - ], - opts - ) - ) - end -end diff --git a/benchmarks/benchmark.exs b/benchmarks/benchmark.exs new file mode 100644 index 00000000..18fc1972 --- /dev/null +++ b/benchmarks/benchmark.exs @@ -0,0 +1,75 @@ +## Benchmarks + +_ = Application.start(:telemetry) + +defmodule Cache do + @moduledoc false + use Nebulex.Cache, + otp_app: :nebulex, + adapter: Nebulex.Adapters.Nil +end + +benchmarks = %{ + "fetch" => fn -> + Cache.fetch("foo") + end, + "get" => fn -> + Cache.get("foo") + end, + "get_all" => fn -> + Cache.get_all({:in, ["foo", "bar"]}) + end, + "put" => fn -> + Cache.put("foo", "bar") + end, + "put_new" => fn -> + Cache.put_new("foo", "bar") + end, + "replace" => fn -> + Cache.replace("foo", "bar") + end, + "put_all" => fn -> + Cache.put_all([{"foo", "bar"}]) + end, + "delete" => fn -> + Cache.delete("foo") + end, + "take" => fn -> + Cache.take("foo") + end, + "has_key?" => fn -> + Cache.has_key?("foo") + end, + "count_all" => fn -> + Cache.count_all() + end, + "ttl" => fn -> + Cache.ttl("foo") + end, + "expire" => fn -> + Cache.expire("foo", 1) + end, + "incr" => fn -> + Cache.incr(:counter, 1) + end, + "update" => fn -> + Cache.update(1, 1, &Kernel.+(&1, 1)) + end +} + +# Start cache +{:ok, pid} = Cache.start_link() + +Benchee.run( + benchmarks, + formatters: [ + {Benchee.Formatters.Console, comparison: false, extended_statistics: true}, + {Benchee.Formatters.HTML, extended_statistics: true, auto_open: false} + ], + print: [ + fast_warning: false + ] +) + +# Stop cache +if Process.alive?(pid), do: Supervisor.stop(pid) diff --git a/benchmarks/local_with_ets_bench.exs b/benchmarks/local_with_ets_bench.exs deleted file mode 100644 index e75b7deb..00000000 --- a/benchmarks/local_with_ets_bench.exs +++ /dev/null @@ -1,21 +0,0 @@ -## Benchmarks - -:ok = Application.start(:telemetry) -Code.require_file("bench_helper.exs", __DIR__) - -defmodule Cache do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local -end - -# start local cache -{:ok, local} = Cache.start_link(telemetry: false) - -Cache -|> BenchHelper.benchmarks() -|> BenchHelper.run() - -# stop cache -if Process.alive?(local), do: Supervisor.stop(local) diff --git a/benchmarks/local_with_shards_bench.exs b/benchmarks/local_with_shards_bench.exs deleted file mode 100644 index 586013d1..00000000 --- a/benchmarks/local_with_shards_bench.exs +++ /dev/null @@ -1,21 +0,0 @@ -## Benchmarks - -:ok = Application.start(:telemetry) -Code.require_file("bench_helper.exs", __DIR__) - -defmodule Cache do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local -end - -# start local cache -{:ok, local} = Cache.start_link(backend: :shards) - -Cache -|> BenchHelper.benchmarks() -|> BenchHelper.run() - -# stop cache -if Process.alive?(local), do: Supervisor.stop(local) diff --git a/benchmarks/partitioned_bench.exs b/benchmarks/partitioned_bench.exs deleted file mode 100644 index 6b1163f1..00000000 --- a/benchmarks/partitioned_bench.exs +++ /dev/null @@ -1,22 +0,0 @@ -## Benchmarks - -:ok = Application.start(:telemetry) -Code.require_file("bench_helper.exs", __DIR__) - -nodes = [:"node1@127.0.0.1", :"node2@127.0.0.1"] -Nebulex.Cluster.spawn(nodes) - -alias Nebulex.NodeCase -alias Nebulex.TestCache.Partitioned - -# start distributed caches -{:ok, dist} = Partitioned.start_link(primary: [backend: :shards]) -node_pid_list = NodeCase.start_caches(Node.list(), [{Partitioned, primary: [backend: :shards]}]) - -Partitioned -|> BenchHelper.benchmarks() -|> BenchHelper.run(parallel: 4, time: 30) - -# stop caches -if Process.alive?(dist), do: Supervisor.stop(dist) -NodeCase.stop_caches(node_pid_list) diff --git a/coveralls.json b/coveralls.json index a9713911..199e58dc 100644 --- a/coveralls.json +++ b/coveralls.json @@ -4,6 +4,8 @@ }, "skip_files": [ + "lib/nebulex/cache/options.ex", + "lib/nebulex/caching/options.ex", "test/support/*", "test/dialyzer/*" ] diff --git a/guides/cache-usage-patterns.md b/guides/cache-usage-patterns.md index ba730f5e..bbbcb34f 100644 --- a/guides/cache-usage-patterns.md +++ b/guides/cache-usage-patterns.md @@ -1,9 +1,10 @@ -# Cache Usage Patterns via Nebulex.Caching +# Cache Usage Patterns via Nebulex.Caching.Decorators There are several common access patterns when using a cache. **Nebulex** -supports most of these patterns by means of [Nebulex.Caching][nbx_caching]. +supports most of these patterns by means of +[Nebulex.Caching.Decorators][nbx_caching]. -[nbx_caching]: http://hexdocs.pm/nebulex/Nebulex.Caching.html +[nbx_caching]: http://hexdocs.pm/nebulex/Nebulex.Caching.Decorators.html > Most of the following documentation about caching patterns it based on [EHCache Docs][EHCache] @@ -71,7 +72,7 @@ A disadvantage of using the cache-as-SoR pattern is: * Less directly visible code-path -But how to get all this out-of-box? This is where declarative annotation-based +But how to get all this out-of-box? This is where declarative decorator-based caching comes in. Nebulex provides a set of annotation to abstract most of the logic behind **Read-through** and **Write-through** patterns and make the implementation extremely easy. But let's go over these patterns more in detail diff --git a/guides/creating-new-adapter.md b/guides/creating-new-adapter.md index 6daca250..90c037f8 100644 --- a/guides/creating-new-adapter.md +++ b/guides/creating-new-adapter.md @@ -23,7 +23,7 @@ Now let's modify `mix.exs` so that we could fetch Nebulex repository. defmodule NebulexMemoryAdapter.MixProject do use Mix.Project - @nbx_vsn "2.5.2" + @nbx_vsn "2.4.2" @version "0.1.0" def project do @@ -108,7 +108,7 @@ end We won't be writing tests ourselves. Instead, we will use shared tests from the Nebulex parent repo. To do so, we will create a helper module in `test/shared/cache_test.exs` that will `use` test suites for behaviour we are -going to implement. The minimal set of behaviours is `Entry` and `Queryable` so +going to implement. The minimal set of behaviours is `KV` and `Queryable` so we'll go with them. ```elixir @@ -119,7 +119,7 @@ defmodule NebulexMemoryAdapter.CacheTest do defmacro __using__(_opts) do quote do - use Nebulex.Cache.EntryTest + use Nebulex.Cache.KVTest use Nebulex.Cache.QueryableTest end end @@ -187,7 +187,7 @@ Another try ```console mix test == Compilation error in file test/nebulex_memory_adapter_test.exs == -** (CompileError) test/nebulex_memory_adapter_test.exs:3: module Nebulex.Cache.EntryTest is not loaded and could not be found +** (CompileError) test/nebulex_memory_adapter_test.exs:3: module Nebulex.Cache.KVTest is not loaded and could not be found (elixir 1.13.2) expanding macro: Kernel.use/1 test/nebulex_memory_adapter_test.exs:3: NebulexMemoryAdapterTest (module) expanding macro: NebulexMemoryAdapter.CacheTest.__using__/1 @@ -256,21 +256,25 @@ defmodule NebulexMemoryAdapter do @behaviour Nebulex.Adapter @behaviour Nebulex.Adapter.Queryable + import Nebulex.Utils + @impl Nebulex.Adapter defmacro __before_compile__(_env), do: :ok @impl Nebulex.Adapter def init(_opts) do child_spec = Supervisor.child_spec({Agent, fn -> %{} end}, id: {Agent, 1}) + {:ok, child_spec, %{}} end @impl Nebulex.Adapter.Queryable def execute(adapter_meta, :delete_all, query, opts) do deleted = Agent.get(adapter_meta.pid, &map_size/1) + Agent.update(adapter_meta.pid, fn _state -> %{} end) - deleted + wrap_ok deleted end end ``` @@ -301,29 +305,32 @@ one-by-one or define them all in bulk. For posterity, we put a complete ```elixir defmodule NebulexMemoryAdapter do @behaviour Nebulex.Adapter - @behaviour Nebulex.Adapter.Entry + @behaviour Nebulex.Adapter.KV @behaviour Nebulex.Adapter.Queryable + import Nebulex.Utils + @impl Nebulex.Adapter defmacro __before_compile__(_env), do: :ok @impl Nebulex.Adapter def init(_opts) do child_spec = Supervisor.child_spec({Agent, fn -> %{} end}, id: {Agent, 1}) + {:ok, child_spec, %{}} end - @impl Nebulex.Adapter.Entry - def get(adapter_meta, key, _opts) do - Agent.get(adapter_meta.pid, &Map.get(&1, key)) + @impl Nebulex.Adapter.KV + def fetch(adapter_meta, key, _opts) do + wrap_ok Agent.get(adapter_meta.pid, &Map.get(&1, key)) end - @impl Nebulex.Adapter.Entry + @impl Nebulex.Adapter.KV def get_all(adapter_meta, keys, _opts) do - Agent.get(adapter_meta.pid, &Map.take(&1, keys)) + wrap_ok Agent.get(adapter_meta.pid, &Map.take(&1, keys)) end - @impl Nebulex.Adapter.Entry + @impl Nebulex.Adapter.KV def put(adapter_meta, key, value, ttl, :put_new, opts) do if get(adapter_meta, key, []) do false @@ -331,98 +338,109 @@ defmodule NebulexMemoryAdapter do put(adapter_meta, key, value, ttl, :put, opts) true end + |> wrap_ok() end def put(adapter_meta, key, value, ttl, :replace, opts) do if get(adapter_meta, key, []) do put(adapter_meta, key, value, ttl, :put, opts) + true else false end + |> wrap_ok() end def put(adapter_meta, key, value, _ttl, _on_write, _opts) do Agent.update(adapter_meta.pid, &Map.put(&1, key, value)) - true + + wrap_ok true end - @impl Nebulex.Adapter.Entry + @impl Nebulex.Adapter.KV def put_all(adapter_meta, entries, ttl, :put_new, opts) do if get_all(adapter_meta, Map.keys(entries), []) == %{} do put_all(adapter_meta, entries, ttl, :put, opts) + true else false end + |> wrap_ok() end def put_all(adapter_meta, entries, _ttl, _on_write, _opts) do entries = Map.new(entries) + Agent.update(adapter_meta.pid, &Map.merge(&1, entries)) - true + + wrap_ok true end - @impl Nebulex.Adapter.Entry + @impl Nebulex.Adapter.KV def delete(adapter_meta, key, _opts) do - Agent.update(adapter_meta.pid, &Map.delete(&1, key)) + wrap_ok Agent.update(adapter_meta.pid, &Map.delete(&1, key)) end - @impl Nebulex.Adapter.Entry + @impl Nebulex.Adapter.KV def take(adapter_meta, key, _opts) do value = get(adapter_meta, key, []) + delete(adapter_meta, key, []) - value + + wrap_ok value end - @impl Nebulex.Adapter.Entry + @impl Nebulex.Adapter.KV def update_counter(adapter_meta, key, amount, _ttl, default, _opts) do Agent.update(adapter_meta.pid, fn state -> Map.update(state, key, default + amount, fn v -> v + amount end) end) - get(adapter_meta, key, []) + wrap_ok get(adapter_meta, key, []) end - @impl Nebulex.Adapter.Entry - def has_key?(adapter_meta, key) do - Agent.get(adapter_meta.pid, &Map.has_key?(&1, key)) + @impl Nebulex.Adapter.KV + def has_key?(adapter_meta, key, _opts) do + wrap_ok Agent.get(adapter_meta.pid, &Map.has_key?(&1, key)) end - @impl Nebulex.Adapter.Entry - def ttl(_adapter_meta, _key) do - nil + @impl Nebulex.Adapter.KV + def ttl(_adapter_meta, _key, _opts) do + wrap_ok nil end - @impl Nebulex.Adapter.Entry - def expire(_adapter_meta, _key, _ttl) do - true + @impl Nebulex.Adapter.KV + def expire(_adapter_meta, _key, _ttl, _opts) do + wrap_ok true end - @impl Nebulex.Adapter.Entry - def touch(_adapter_meta, _key) do - true + @impl Nebulex.Adapter.KV + def touch(_adapter_meta, _key, _opts) do + wrap_ok true end @impl Nebulex.Adapter.Queryable def execute(adapter_meta, :delete_all, _query, _opts) do deleted = execute(adapter_meta, :count_all, nil, []) + Agent.update(adapter_meta.pid, fn _state -> %{} end) - deleted + wrap_ok deleted end def execute(adapter_meta, :count_all, _query, _opts) do - Agent.get(adapter_meta.pid, &map_size/1) + wrap_ok Agent.get(adapter_meta.pid, &map_size/1) end def execute(adapter_meta, :all, _query, _opts) do - Agent.get(adapter_meta.pid, &Map.values/1) + wrap_ok Agent.get(adapter_meta.pid, &Map.values/1) end @impl Nebulex.Adapter.Queryable - def stream(_adapter_meta, :invalid_query, _opts) do - raise Nebulex.QueryError, message: "foo", query: :invalid_query + def stream(%{name: name}, :invalid_query, _opts) do + wrap_error Nebulex.Error, reason: :invalid_query, query: :invalid_query, cache: name end def stream(adapter_meta, _query, opts) do @@ -438,7 +456,7 @@ defmodule NebulexMemoryAdapter do &Map.keys/1 end - Agent.get(adapter_meta.pid, fun) + wrap_ok Agent.get(adapter_meta.pid, fun) end end ``` diff --git a/guides/getting-started.md b/guides/getting-started.md index 0f4f47c1..ec6e9eee 100644 --- a/guides/getting-started.md +++ b/guides/getting-started.md @@ -29,10 +29,10 @@ changing the `deps` definition in that file to this: ```elixir defp deps do [ - {:nebulex, "~> 2.5"}, + {:nebulex, "~> 2.4"}, {:shards, "~> 1.0"}, #=> When using :shards as backend {:decorator, "~> 1.4"}, #=> When using Caching Annotations - {:telemetry, "~> 1.0"} #=> When using the Telemetry events (Nebulex stats) + {:telemetry, "~> 1.0"} #=> When using the Telemetry events ] end ``` @@ -45,7 +45,7 @@ makes all its dependencies as optional. For example: to add `:shards` to the dependency list. * For enabling the usage of - [declarative annotation-based caching via decorators][nbx_caching], + [declarative decorator-based caching via decorators][nbx_caching], you have to add `:decorator` to the dependency list. * For enabling Telemetry events to be dispatched when using Nebulex, @@ -55,7 +55,7 @@ makes all its dependencies as optional. For example: * If you want to use an external adapter (e.g: Cachex or Redis adapter), you have to add the adapter dependency too. -[nbx_caching]: http://hexdocs.pm/nebulex/Nebulex.Caching.html +[nbx_caching]: http://hexdocs.pm/nebulex/Nebulex.Caching.Decorators.html [telemetry]: http://hexdocs.pm/nebulex/telemetry.html To install these dependencies, we will run this command: @@ -267,13 +267,6 @@ iex> Blog.Cache.has_key?(10) false ``` -Retrieving multiple entries - -```elixir -iex> Blog.Cache.get_all([1, 2, 3]) -_users -``` - ## Updating entries Nebulex provides `update` and `get_and_update` functions to update an @@ -363,23 +356,27 @@ cache matching the given query. ```elixir # by default, returns all keys -iex> Blog.Cache.all() +iex> Blog.Cache.get_all() #=> The query is set to nil by default _all_entries # fetch all entries and return the keys -iex> Blog.Cache.all(nil, return: :key) +iex> Blog.Cache.get_all(nil, return: :keys) _keys +# fetch entries associated to the requested keys +iex> Blog.Cache.get_all({:in, ["k1", "k2"]}) +_fetched_entries + # built-in queries in `Nebulex.Adapters.Local` adapter -iex> Blog.Cache.all(nil) -iex> Blog.Cache.all(:unexpired) -iex> Blog.Cache.all(:expired) +iex> Blog.Cache.get_all(nil) #=> Equivalent to Blog.Cache.get_all() +iex> Blog.Cache.get_all(:unexpired) +iex> Blog.Cache.get_all(:expired) # if we are using `Nebulex.Adapters.Local` adapter, the stored entry # is a tuple `{:entry, key, value, touched, ttl}`, then the match spec # could be something like: iex> spec = [{{:_, :"$1", :"$2", :_, :_}, [{:>, :"$2", 10}], [{{:"$1", :"$2"}}]}] -iex> Blog.Cache.all(spec) +iex> Blog.Cache.get_all(spec) _all_matched # using Ex2ms @@ -388,7 +385,7 @@ iex> spec = ...> fun do ...> {_, key, value, _, _} when value > 10 -> {key, value} ...> end -iex> Blog.Cache.all(spec) +iex> Blog.Cache.get_all(spec) _all_matched ``` @@ -418,9 +415,9 @@ iex> Blog.Cache.count_all(spec) _num_of_matched_entries ``` -> The previous example assumes you are using the built-in local adapter. +> The previous example assumes you are using `Nebulex.Adapters.Local` adapter. -Also, if you are using the built-in local adapter, you can use the queries +Also, if you are using `Nebulex.Adapters.Local` adapter, you can use the queries `:expired` and `:unexpired` too, like so: ```elixir @@ -442,9 +439,16 @@ iex> Blog.Cache.delete_all() _num_of_removed_entries ``` +One may also delete a list of keys at once (like a bulk delete): + +```elixir +iex> Blog.Cache.delete_all({:in, ["k1", "k2"]}) +_num_of_removed_entries +``` + And just like `count_all/2`, you can also provide a custom query to delete only -the matched entries, or if you are using the built-in local adapter you can also -use the queries `:expired` and `:unexpired`. For example: +the matched entries, or if you are using `Nebulex.Adapters.Local` adapter, you +can also use the queries `:expired` and `:unexpired`. For example: ```elixir iex> expired_entries = Blog.Cache.delete_all(:expired) @@ -460,7 +464,7 @@ iex> Blog.Cache.delete_all(spec) _num_of_matched_entries ``` -> These examples assumes you are using the built-in local adapter. +> These examples assumes you are using `Nebulex.Adapters.Local` adapter. ### Stream all entries from cache matching the given query @@ -472,8 +476,8 @@ stream is evaluated; based on the `:return` option. ```elixir iex> Blog.Cache.stream() -iex> Blog.Cache.stream(nil, page_size: 100, return: :value) -iex> Blog.Cache.stream(nil, page_size: 100, return: :entry) +iex> Blog.Cache.stream(nil, page_size: 100, return: :keys) +iex> Blog.Cache.stream(nil, page_size: 100, return: :values) # using `Nebulex.Adapters.Local` adapter iex> spec = [{{:"$1", :"$2", :_, :_}, [{:>, :"$2", 10}], [{{:"$1", :"$2"}}]}] @@ -582,8 +586,8 @@ mix nbx.gen.cache -c Blog.NearCache -a Nebulex.Adapters.Multilevel ``` By default, the command generates a 2-level near-cache topology. The first -level or `L1` using the built-in local adapter, and the second one or `L2` -using the built-in partitioned adapter. +level or `L1` using `Nebulex.Adapters.Local` adapter, and the second one or `L2` +using `Nebulex.Adapters.Partitioned` adapter. The generated cache module `lib/blog/near_cache.ex`: @@ -672,5 +676,5 @@ To learn more about how multilevel-cache works, please check ## Next - * [Cache Usage Patterns via Nebulex.Caching](http://hexdocs.pm/nebulex/cache-usage-patterns.html) - - Annotations-based DSL to implement different cache usage patterns. + * [Cache Usage Patterns via Nebulex.Caching.Decorators](http://hexdocs.pm/nebulex/cache-usage-patterns.html) + - Annotations-based DSL to implement different cache usage patterns. diff --git a/guides/migrating-to-v2.md b/guides/migrating-to-v2.md index 7d8184aa..83c38d28 100644 --- a/guides/migrating-to-v2.md +++ b/guides/migrating-to-v2.md @@ -34,7 +34,7 @@ There are several changes on the `Nebulex.Cache` API: * Callback `object_info/2` was removed, and callbacks `ttl/1` and `touch/1` were added instead. -## Declarative annotation-based caching via decorators +## Declarative decorator-based caching via decorators * Module `Nebulex.Caching.Decorators` was refactored to `Nebulex.Caching` – Keep in mind that since v1.2.x the caching decorators were included instead diff --git a/guides/telemetry.md b/guides/telemetry.md index 059a81b8..6494d04a 100644 --- a/guides/telemetry.md +++ b/guides/telemetry.md @@ -35,7 +35,16 @@ events: adapter before an adapter callback is executed. * Measurement: `%{system_time: System.monotonic_time()}` - * Metadata: `%{adapter_meta: map, function_name: atom, args: [term]}` + * Metadata: + + ```elixir + %{ + adapter_meta: map, + command: atom, + args: [term], + extra_metadata: map + } + ``` * `[:my_app, :cache, :command, :stop]` - Dispatched by the underlying cache adapter after an adapter callback has been successfully executed. @@ -46,8 +55,9 @@ events: ```elixir %{ adapter_meta: map, - function_name: atom, + command: atom, args: [term], + extra_metadata: map, result: term } ``` @@ -62,8 +72,9 @@ events: ```elixir %{ adapter_meta: map, - function_name: atom, + command: atom, args: [term], + extra_metadata: map, kind: :error | :exit | :throw, reason: term, stacktrace: term @@ -99,28 +110,28 @@ or callback name? In this case, one could define a summary metric like so: Telemetry.Metrics.summary( "my_app.cache.command.stop.duration", unit: {:native, :millisecond}, - tags: [:function_name] + tags: [:command] ) ``` As it is described above in the **"Adapter-specific events"** section, the event -includes the invoked callback name into the metadata as `:function_name`, then +includes the invoked callback name into the metadata as `:command`, then we can add it to the metric's tags. ### Extracting tag values from adapter's metadata Let's add another metric for the command event, this time to group by **cache**, -**adapter**, and **function_name** (adapter's callback): +**adapter**, and **command** (adapter's callback): ```elixir Telemetry.Metrics.summary( "my_app.cache.command.stop.duration", unit: {:native, :millisecond}, - tags: [:cache, :adapter, :function_name], + tags: [:cache, :adapter, :command], tag_values: &Map.merge(&1, %{ cache: &1.adapter_meta.cache, - adapter: &1.adapter_meta.cache.__adapter__() + adapter: &1.adapter_meta.adapter }) ) ``` @@ -133,7 +144,7 @@ transformation on the event metadata in order to get to the values we need. Each adapter is responsible for providing stats by implementing `Nebulex.Adapter.Stats` behaviour. However, Nebulex provides a simple default implementation using [Erlang counters][erl_counters], which is used by -the built-in local adapter. The local adapter uses +`Nebulex.Adapters.Local` adapter. The local adapter uses `Nebulex.Telemetry.StatsHandler` to aggregate the stats and keep them updated, therefore, it requires the Telemetry events are dispatched by the adapter, otherwise, it won't work properly. @@ -201,8 +212,7 @@ defmodule MyApp.Telemetry do last_value("my_app.cache.stats.misses", tags: [:cache]), last_value("my_app.cache.stats.writes", tags: [:cache]), last_value("my_app.cache.stats.updates", tags: [:cache]), - last_value("my_app.cache.stats.evictions", tags: [:cache]), - last_value("my_app.cache.stats.expirations", tags: [:cache]) + last_value("my_app.cache.stats.evictions", tags: [:cache]) ] end @@ -225,24 +235,24 @@ children = [ ] ``` -Now start an IEx session and call the server: +Now start an IEx session and make some cache calls: ``` -iex(1)> MyApp.Cache.get 1 +iex(1)> MyApp.Cache.get! 1 nil -iex(2)> MyApp.Cache.put 1, 1, ttl: 10 +iex(2)> MyApp.Cache.put! 1, 1, ttl: 10 :ok -iex(3)> MyApp.Cache.get 1 +iex(3)> MyApp.Cache.get! 1 1 -iex(4)> MyApp.Cache.put 2, 2 +iex(4)> MyApp.Cache.put! 2, 2 :ok -iex(5)> MyApp.Cache.delete 2 +iex(5)> MyApp.Cache.delete! 2 :ok iex(6)> Process.sleep(20) :ok -iex(7)> MyApp.Cache.get 1 +iex(7)> MyApp.Cache.get! 1 nil -iex(2)> MyApp.Cache.replace 1, 11 +iex(2)> MyApp.Cache.replace! 1, 11 true ``` @@ -251,7 +261,7 @@ and you should see something like the following output: ``` [Telemetry.Metrics.ConsoleReporter] Got new event! Event name: my_app.cache.stats -All measurements: %{evictions: 2, expirations: 1, hits: 1, misses: 2, updates: 1, writes: 2} +All measurements: %{evictions: 2, hits: 1, misses: 2, updates: 1, writes: 2} All metadata: %{cache: MyApp.Cache} Metric measurement: :hits (last_value) @@ -273,10 +283,6 @@ Tag values: %{cache: MyApp.Cache} Metric measurement: :evictions (last_value) With value: 2 Tag values: %{cache: MyApp.Cache} - -Metric measurement: :expirations (last_value) -With value: 1 -Tag values: %{cache: MyApp.Cache} ``` ### Custom metrics @@ -325,8 +331,7 @@ defp metrics do last_value("my_app.cache.stats.misses", tags: [:cache, :node]), last_value("my_app.cache.stats.writes", tags: [:cache, :node]), last_value("my_app.cache.stats.updates", tags: [:cache, :node]), - last_value("my_app.cache.stats.evictions", tags: [:cache, :node]), - last_value("my_app.cache.stats.expirations", tags: [:cache, :node]), + last_value("my_app.cache.stats.evictions", tags: [:cache, :node]) # Nebulex custom Metrics last_value("my_app.cache.size.value", tags: [:cache, :node]) @@ -339,7 +344,7 @@ If you start an IEx session like previously, you should see the new metric too: ``` [Telemetry.Metrics.ConsoleReporter] Got new event! Event name: my_app.cache.stats -All measurements: %{evictions: 0, expirations: 0, hits: 0, misses: 0, updates: 0, writes: 0} +All measurements: %{evictions: 0, hits: 0, misses: 0, updates: 0, writes: 0} All metadata: %{cache: MyApp.Cache, node: :nonode@nohost} Metric measurement: :hits (last_value) @@ -362,10 +367,6 @@ Metric measurement: :evictions (last_value) With value: 0 Tag values: %{cache: MyApp.Cache, node: :nonode@nohost} -Metric measurement: :expirations (last_value) -With value: 0 -Tag values: %{cache: MyApp.Cache, node: :nonode@nohost} - [Telemetry.Metrics.ConsoleReporter] Got new event! Event name: my_app.cache.size All measurements: %{value: 0} @@ -408,8 +409,8 @@ Then, when you run `MyApp.Multilevel.stats()` you get something like: ```elixir %Nebulex.Stats{ measurements: %{ - l1: %{evictions: 0, expirations: 0, hits: 0, misses: 0, updates: 0, writes: 0}, - l2: %{evictions: 0, expirations: 0, hits: 0, misses: 0, updates: 0, writes: 0} + l1: %{evictions: 0, hits: 0, misses: 0, updates: 0, writes: 0}, + l2: %{evictions: 0, hits: 0, misses: 0, updates: 0, writes: 0} }, metadata: %{ l1: %{ @@ -459,11 +460,6 @@ metrics in this way: measurement: &get_in(&1, [:l1, :evictions]), tags: [:cache] ), - last_value("my_app.cache.stats.l1.expirations", - event_name: "my_app.cache.stats", - measurement: &get_in(&1, [:l1, :expirations]), - tags: [:cache] - ), # L2 metrics last_value("my_app.cache.stats.l2.hits", diff --git a/lib/mix/tasks/nbx.ex b/lib/mix/tasks/nbx.ex index 23424e50..84c60f9b 100644 --- a/lib/mix/tasks/nbx.ex +++ b/lib/mix/tasks/nbx.ex @@ -24,6 +24,7 @@ defmodule Mix.Tasks.Nbx do defp general do _ = Application.ensure_all_started(:nebulex) + Mix.shell().info("Nebulex v#{Application.spec(:nebulex, :vsn)}") Mix.shell().info("In-Process and Distributed Cache Toolkit for Elixir.") diff --git a/lib/mix/tasks/nbx.gen.cache.ex b/lib/mix/tasks/nbx.gen.cache.ex index 54e31c1f..9c92b8a5 100644 --- a/lib/mix/tasks/nbx.gen.cache.ex +++ b/lib/mix/tasks/nbx.gen.cache.ex @@ -83,6 +83,8 @@ defmodule Mix.Tasks.Nbx.Gen.Cache do {#{inspect(cache)}, []} + And for more information about configuration options, check + adapters documentation and Nebulex.Cache shared options. """) end diff --git a/lib/nebulex.ex b/lib/nebulex.ex index 540e5156..2fc7e5d9 100644 --- a/lib/nebulex.ex +++ b/lib/nebulex.ex @@ -6,7 +6,7 @@ defmodule Nebulex do Via the cache, we can put, get, update, delete and query existing entries. A cache needs an adapter to communicate to the in-memory data store. - * `Nebulex.Caching` - Declarative annotation-based caching via + * `Nebulex.Caching` - Declarative decorator-based caching via **`Nebulex.Caching.Decorators`**. Decorators provide n elegant way of annotating functions to be cached or evicted. Caching decorators also enable the usage and/or implementation of cache usage patterns like @@ -61,8 +61,8 @@ defmodule Nebulex do Otherwise, you can start and stop the cache directly at any time by calling `MyApp.Cache.start_link/1` and `MyApp.Cache.stop/1`. - ## Declarative annotation-based caching + ## Declarative decorator-based caching - See [Nebulex.Caching](http://hexdocs.pm/nebulex/Nebulex.Caching.html). + See [Nebulex.Caching.Decorators](http://hexdocs.pm/nebulex/Nebulex.Caching.Decorators.html). """ end diff --git a/lib/nebulex/adapter.ex b/lib/nebulex/adapter.ex index 8be20bc4..9bb724b7 100644 --- a/lib/nebulex/adapter.ex +++ b/lib/nebulex/adapter.ex @@ -3,25 +3,27 @@ defmodule Nebulex.Adapter do Specifies the minimal API required from adapters. """ + alias Nebulex.Cache.Options alias Nebulex.Telemetry @typedoc "Adapter" @type t :: module - @typedoc "Metadata type" - @type metadata :: %{optional(atom) => term} - @typedoc """ The metadata returned by the adapter `c:init/1`. - It must be a map and Nebulex itself will always inject two keys into - the meta: + It must be a map and Nebulex itself will always inject + the following keys into the meta: * `:cache` - The cache module. - * `:pid` - The PID returned by the child spec returned in `c:init/1` + * `:name` - The name of the cache supervisor process. + * `:pid` - The PID returned by the child spec returned in `c:init/1`. + * `:adapter` - The defined cache adapter. """ - @type adapter_meta :: metadata + @type adapter_meta() :: %{optional(term) => term} + + ## Callbacks @doc """ The callback invoked in case the adapter needs to inject code. @@ -29,9 +31,31 @@ defmodule Nebulex.Adapter do @macrocallback __before_compile__(env :: Macro.Env.t()) :: Macro.t() @doc """ - Initializes the adapter supervision tree by returning the children. + Initializes the adapter supervision tree by returning the children + and adapter metadata. """ - @callback init(config :: Keyword.t()) :: {:ok, :supervisor.child_spec(), adapter_meta} + @callback init(config :: keyword()) :: {:ok, :supervisor.child_spec(), adapter_meta()} + + # Define optional callbacks + @optional_callbacks __before_compile__: 1 + + ## API + + # Inline common instructions + @compile {:inline, lookup_meta: 1} + + @doc """ + Returns the adapter metadata from its `c:init/1` callback. + + It expects a process name of the cache. The name is either + an atom or a PID. For a given cache, you often want to call + this function based on the dynamic cache: + + Nebulex.Adapter.lookup_meta(cache.get_dynamic_cache()) + + """ + @spec lookup_meta(atom() | pid()) :: {:ok, adapter_meta()} | {:error, Nebulex.Error.t()} + defdelegate lookup_meta(name_or_pid), to: Nebulex.Cache.Registry, as: :lookup @doc """ Executes the function `fun` passing as parameters the adapter and metadata @@ -39,81 +63,116 @@ defmodule Nebulex.Adapter do It expects a name or a PID representing the cache. """ - @spec with_meta(atom | pid, (module, adapter_meta -> term)) :: term + @spec with_meta(atom() | pid(), (adapter_meta() -> any())) :: any() | {:error, Nebulex.Error.t()} def with_meta(name_or_pid, fun) do - {adapter, adapter_meta} = Nebulex.Cache.Registry.lookup(name_or_pid) - fun.(adapter, adapter_meta) + with {:ok, adapter_meta} <- lookup_meta(name_or_pid) do + fun.(adapter_meta) + end end - # FIXME: ExCoveralls does not mark most of this section as covered - # coveralls-ignore-start + ## Helpers @doc """ - Helper macro for the adapters so they can add the logic for emitting the - recommended Telemetry events. + Builds up a public wrapper function for invoking an adapter command. - See the built-in adapters for more information on how to use this macro. + **NOTE:** Internal purposes only. """ - defmacro defspan(fun, opts \\ [], do: block) do - {name, [adapter_meta | args_tl], as, [_ | as_args_tl] = as_args} = build_defspan(fun, opts) - - quote do - def unquote(name)(unquote_splicing(as_args)) + defmacro defcommand(fun, opts \\ []) do + build_defcommand(:public, fun, opts) + end - def unquote(name)(%{telemetry: false} = unquote(adapter_meta), unquote_splicing(args_tl)) do - unquote(block) - end + @doc """ + Builds up a private wrapper function for invoking an adapter command. - def unquote(name)(unquote_splicing(as_args)) do - metadata = %{ - adapter_meta: unquote(adapter_meta), - function_name: unquote(as), - args: unquote(as_args_tl) - } - - Telemetry.span( - unquote(adapter_meta).telemetry_prefix ++ [:command], - metadata, - fn -> - result = - unquote(name)( - Map.merge(unquote(adapter_meta), %{telemetry: false, in_span?: true}), - unquote_splicing(as_args_tl) - ) - - {result, Map.put(metadata, :result, result)} - end - ) - end - end + **NOTE:** Internal purposes only. + """ + defmacro defcommandp(fun, opts \\ []) do + build_defcommand(:private, fun, opts) end - ## Private Functions + defp build_defcommand(public_or_private, fun, opts) do + # Decompose the function call + {function_name, [name_or_pid | args_tl] = args} = Macro.decompose_call(fun) - defp build_defspan(fun, opts) when is_list(opts) do - {name, args} = - case Macro.decompose_call(fun) do - {_, _} = pair -> pair - _ -> raise ArgumentError, "invalid syntax in defspan #{Macro.to_string(fun)}" - end + # Get options + command = Keyword.get(opts, :command, function_name) + l_args = Keyword.get(opts, :largs, []) + r_args = Keyword.get(opts, :rargs, []) - as = Keyword.get(opts, :as, name) - as_args = build_as_args(args) + # Build command args + command_args = l_args ++ args_tl ++ r_args - {name, args, as, as_args} + # Build the function + case public_or_private do + :public -> + quote do + def unquote(function_name)(unquote_splicing(args)) do + unquote(command_call(name_or_pid, command, command_args)) + end + end + + :private -> + quote do + defp unquote(function_name)(unquote_splicing(args)) do + unquote(command_call(name_or_pid, command, command_args)) + end + end + end end - defp build_as_args(args) do - for {arg, idx} <- Enum.with_index(args) do - arg - |> Macro.to_string() - |> build_as_arg({arg, idx}) + defp command_call(name_or_pid, command, args) do + quote do + with {:ok, adapter_meta} <- unquote(__MODULE__).lookup_meta(unquote(name_or_pid)) do + unquote(__MODULE__).run_command(adapter_meta, unquote(command), unquote(args)) + end end end - # sobelow_skip ["DOS.BinToAtom"] - defp build_as_arg("_" <> _, {{_e1, e2, e3}, idx}), do: {:"var#{idx}", e2, e3} - defp build_as_arg(_, {arg, _idx}), do: arg + @doc """ + Convenience function for invoking the adapter running a command. + + **NOTE:** Internal purposes only. + """ + @spec run_command(adapter_meta(), atom(), [any()]) :: any() + def run_command(adapter_meta, command, args) + + def run_command( + %{ + telemetry: true, + telemetry_prefix: telemetry_prefix, + adapter: adapter + } = adapter_meta, + command, + args + ) do + opts = + args + # TODO: Replace with `List.last/2` when required Elixir version is >= 1.12 + |> List.last() + |> Kernel.||([]) + |> Keyword.take([:telemetry_event, :telemetry_metadata]) + |> Options.validate_runtime_shared_opts!() + + metadata = %{ + adapter_meta: adapter_meta, + command: command, + args: args, + extra_metadata: Keyword.fetch!(opts, :telemetry_metadata) + } + + opts + |> Keyword.get(:telemetry_event, telemetry_prefix ++ [:command]) + |> Telemetry.span( + metadata, + fn -> + result = apply(adapter, command, [adapter_meta | args]) + + {result, Map.put(metadata, :result, result)} + end + ) + end - # coveralls-ignore-stop + def run_command(%{adapter: adapter} = adapter_meta, command, args) do + apply(adapter, command, [adapter_meta | args]) + end end diff --git a/lib/nebulex/adapter/entry.ex b/lib/nebulex/adapter/entry.ex deleted file mode 100644 index ccf6efff..00000000 --- a/lib/nebulex/adapter/entry.ex +++ /dev/null @@ -1,165 +0,0 @@ -defmodule Nebulex.Adapter.Entry do - @moduledoc """ - Specifies the entry API required from adapters. - - This behaviour specifies all read/write key-based functions, - the ones applied to a specific cache entry. - """ - - @typedoc "Proxy type to the adapter meta" - @type adapter_meta :: Nebulex.Adapter.adapter_meta() - - @typedoc "Proxy type to the cache key" - @type key :: Nebulex.Cache.key() - - @typedoc "Proxy type to the cache value" - @type value :: Nebulex.Cache.value() - - @typedoc "Proxy type to the cache options" - @type opts :: Nebulex.Cache.opts() - - @typedoc "Proxy type to the cache entries" - @type entries :: Nebulex.Cache.entries() - - @typedoc "TTL for a cache entry" - @type ttl :: timeout - - @typedoc "Write command" - @type on_write :: :put | :put_new | :replace - - @doc """ - Gets the value for a specific `key` in `cache`. - - See `c:Nebulex.Cache.get/2`. - """ - @callback get(adapter_meta, key, opts) :: value - - @doc """ - Gets a collection of entries from the Cache, returning them as `Map.t()` of - the values associated with the set of keys requested. - - For every key that does not hold a value or does not exist, that key is - simply ignored. Because of this, the operation never fails. - - See `c:Nebulex.Cache.get_all/2`. - """ - @callback get_all(adapter_meta, [key], opts) :: map - - @doc """ - Puts the given `value` under `key` into the `cache`. - - Returns `true` if the `value` with key `key` is successfully inserted; - otherwise `false` is returned. - - The `ttl` argument sets the time-to-live for the stored entry. If it is not - set, it means the entry hasn't a time-to-live, then it shouldn't expire. - - ## OnWrite - - The `on_write` argument supports the following values: - - * `:put` - If the `key` already exists, it is overwritten. Any previous - time-to-live associated with the key is discarded on successful `write` - operation. - - * `:put_new` - It only stores the entry if the `key` does not already exist, - otherwise, `false` is returned. - - * `:replace` - Alters the value stored under the given `key`, but only - if the key already exists into the cache, otherwise, `false` is - returned. - - See `c:Nebulex.Cache.put/3`, `c:Nebulex.Cache.put_new/3`, - `c:Nebulex.Cache.replace/3`. - """ - @callback put(adapter_meta, key, value, ttl, on_write, opts) :: boolean - - @doc """ - Puts the given `entries` (key/value pairs) into the `cache`. - - Returns `true` if all the keys were inserted. If no key was inserted - (at least one key already existed), `false` is returned. - - The `ttl` argument sets the time-to-live for the stored entry. If it is not - set, it means the entry hasn't a time-to-live, then it shouldn't expire. - The given `ttl` is applied to all keys. - - ## OnWrite - - The `on_write` argument supports the following values: - - * `:put` - If the `key` already exists, it is overwritten. Any previous - time-to-live associated with the key is discarded on successful `write` - operation. - - * `:put_new` - It only stores the entry if the `key` does not already exist, - otherwise, `false` is returned. - - Ideally, this operation should be atomic, so all given keys are set at once. - But it depends purely on the adapter's implementation and the backend used - internally by the adapter. Hence, it is recommended to checkout the - adapter's documentation. - - See `c:Nebulex.Cache.put_all/2`. - """ - @callback put_all(adapter_meta, entries, ttl, on_write, opts) :: boolean - - @doc """ - Deletes a single entry from cache. - - See `c:Nebulex.Cache.delete/2`. - """ - @callback delete(adapter_meta, key, opts) :: :ok - - @doc """ - Returns and removes the entry with key `key` in the cache. - - See `c:Nebulex.Cache.take/2`. - """ - @callback take(adapter_meta, key, opts) :: value - - @doc """ - Updates the counter mapped to the given `key`. - - If `amount` > 0, the counter is incremented by the given `amount`. - If `amount` < 0, the counter is decremented by the given `amount`. - If `amount` == 0, the counter is not updated. - - See `c:Nebulex.Cache.incr/3`. - See `c:Nebulex.Cache.decr/3`. - """ - @callback update_counter(adapter_meta, key, amount, ttl, default, opts) :: - integer - when amount: integer, default: integer - - @doc """ - Returns whether the given `key` exists in cache. - - See `c:Nebulex.Cache.has_key?/1`. - """ - @callback has_key?(adapter_meta, key) :: boolean - - @doc """ - Returns the TTL (time-to-live) for the given `key`. If the `key` does not - exist, then `nil` is returned. - - See `c:Nebulex.Cache.ttl/1`. - """ - @callback ttl(adapter_meta, key) :: ttl | nil - - @doc """ - Returns `true` if the given `key` exists and the new `ttl` was successfully - updated, otherwise, `false` is returned. - - See `c:Nebulex.Cache.expire/2`. - """ - @callback expire(adapter_meta, key, ttl) :: boolean - - @doc """ - Returns `true` if the given `key` exists and the last access time was - successfully updated, otherwise, `false` is returned. - - See `c:Nebulex.Cache.touch/1`. - """ - @callback touch(adapter_meta, key) :: boolean -end diff --git a/lib/nebulex/adapter/info.ex b/lib/nebulex/adapter/info.ex new file mode 100644 index 00000000..e001c49b --- /dev/null +++ b/lib/nebulex/adapter/info.ex @@ -0,0 +1,36 @@ +defmodule Nebulex.Adapter.Info do + @moduledoc """ + Specifies the adapter Info API. + """ + + @doc """ + Returns `{:ok, info}` where `info` contains the requested cache information, + as specified by the `spec`. + + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. + + The `spec` (information specification key) can be: + + * **The atom `:all`**: returns a map with all information items. + * **An atom**: returns the value for the requested information item. + * **A list of atoms**: returns a map only with the requested information + items. + + The adapters are free to add the information specification keys they want, + however, Nebulex suggests the adapters add the following specs: + + * `:server` - General information about the cache server. E.g.: cache name, + adapter, PID, etc. + * `:memory` - Memory consumption related information. E.g.: used memory, + allocated memory, etc. + * `:stats` - Cache statistics. E.g.: hits, misses, etc. + + See `c:Nebulex.Cache.info/2`. + """ + @callback info( + Nebulex.Adapter.adapter_meta(), + Nebulex.Cache.info_spec(), + Nebulex.Cache.opts() + ) :: Nebulex.Cache.ok_error_tuple(Nebulex.Cache.info_data()) +end diff --git a/lib/nebulex/adapter/keyslot.ex b/lib/nebulex/adapter/keyslot.ex deleted file mode 100644 index 58d94930..00000000 --- a/lib/nebulex/adapter/keyslot.ex +++ /dev/null @@ -1,51 +0,0 @@ -defmodule Nebulex.Adapter.Keyslot do - @moduledoc """ - This behaviour provides a callback to compute the hash slot for a specific - key based on the number of slots (partitions, nodes, ...). - - The purpose of this module is to allow users to implement a custom - hash-slot function to distribute the keys. It can be used to select - the node/slot where a specific key is supposed to be. - - > It is highly recommended to use a **Consistent Hashing** algorithm. - - ## Example - - defmodule MyApp.Keyslot do - use Nebulex.Adapter.Keyslot - - @impl true - def hash_slot(key, range) do - key - |> :erlang.phash2() - |> :jchash.compute(range) - end - end - - This example uses [Jumping Consistent Hash](https://github.com/cabol/jchash). - """ - - @doc """ - Returns an integer within the range `0..range-1` identifying the hash slot - the specified `key` hashes to. - - ## Example - - iex> MyKeyslot.hash_slot("mykey", 10) - 2 - - """ - @callback hash_slot(key :: any, range :: pos_integer) :: non_neg_integer - - @doc false - defmacro __using__(_opts) do - quote do - @behaviour Nebulex.Adapter.Keyslot - - @impl true - defdelegate hash_slot(key, range), to: :erlang, as: :phash2 - - defoverridable hash_slot: 2 - end - end -end diff --git a/lib/nebulex/adapter/kv.ex b/lib/nebulex/adapter/kv.ex new file mode 100644 index 00000000..2c157958 --- /dev/null +++ b/lib/nebulex/adapter/kv.ex @@ -0,0 +1,206 @@ +defmodule Nebulex.Adapter.KV do + @moduledoc """ + Specifies the adapter Key/Value API. + + This behaviour specifies all read/write key-based functions, + the ones applied to a specific cache key. + """ + + @typedoc "Proxy type to the adapter meta" + @type adapter_meta() :: Nebulex.Adapter.adapter_meta() + + @typedoc "Proxy type to the cache key" + @type key() :: Nebulex.Cache.key() + + @typedoc "Proxy type to the cache value" + @type value() :: Nebulex.Cache.value() + + @typedoc "Proxy type to the cache options" + @type opts() :: Nebulex.Cache.opts() + + @typedoc "Proxy type to the cache entries" + @type entries() :: Nebulex.Cache.entries() + + @typedoc "TTL for a cache entry" + @type ttl() :: timeout() + + @typedoc "Write command type" + @type on_write() :: :put | :put_new | :replace + + @doc """ + Fetches the value for a specific `key` in the cache. + + If the cache contains the given `key`, then its value is returned + in the shape of `{:ok, value}`. + + If there's an error with executing the command, `{:error, reason}` + is returned. `reason` is the cause of the error and can be + `Nebulex.KeyError` if the cache does not contain `key`, + `Nebulex.Error` otherwise. + + See `c:Nebulex.Cache.fetch/2`. + """ + @callback fetch(adapter_meta(), key(), opts()) :: + Nebulex.Cache.ok_error_tuple(value, Nebulex.Cache.fetch_error_reason()) + + @doc """ + Puts the given `value` under `key` into the `cache`. + + The `ttl` argument sets the time-to-live for the stored entry. If it is not + set, it means the entry hasn't a time-to-live, then it shouldn't expire. + + Returns `{:ok, true}` if the `value` with key `key` is successfully inserted, + otherwise, `{:ok, false}` is returned. + + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. + + ## OnWrite + + The `on_write` argument supports the following values: + + * `:put` - If the `key` already exists, it is overwritten. Any previous + time-to-live associated with the key is discarded on successful `write` + operation. + + * `:put_new` - It only stores the entry if the `key` does not already exist, + otherwise, `{:ok, false}` is returned. + + * `:replace` - Alters the value stored under the given `key`, but only + if the key already exists into the cache, otherwise, `{ok, false}` is + returned. + + See `c:Nebulex.Cache.put/3`, `c:Nebulex.Cache.put_new/3`, + `c:Nebulex.Cache.replace/3`. + """ + @callback put(adapter_meta(), key(), value(), ttl(), on_write(), opts()) :: + Nebulex.Cache.ok_error_tuple(boolean()) + + @doc """ + Puts the given `entries` (key/value pairs) into the `cache`. + + The `ttl` argument sets the time-to-live for the stored entry. If it is not + set, it means the entry hasn't a time-to-live, then it shouldn't expire. + The given `ttl` is applied to all keys. + + Returns `{:ok, true}` if all the keys were inserted. If no key was inserted + (at least one key already existed), `{:ok, false}` is returned. + + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. + + ## OnWrite + + The `on_write` argument supports the following values: + + * `:put` - If the `key` already exists, it is overwritten. Any previous + time-to-live associated with the key is discarded on successful `write` + operation. + + * `:put_new` - It only stores the entry if the `key` does not already exist, + otherwise, `{:ok, false}` is returned. + + Ideally, this operation should be atomic, so all given keys are set at once. + But it depends purely on the adapter's implementation and the backend used + internally by the adapter. Hence, it is recommended to checkout the + adapter's documentation. + + See `c:Nebulex.Cache.put_all/2`. + """ + @callback put_all(adapter_meta(), entries(), ttl(), on_write(), opts()) :: + Nebulex.Cache.ok_error_tuple(boolean()) + + @doc """ + Deletes a single entry from cache. + + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. + + See `c:Nebulex.Cache.delete/2`. + """ + @callback delete(adapter_meta(), key(), opts()) :: :ok | Nebulex.Cache.error_tuple() + + @doc """ + Removes and returns the value associated with `key` in the cache. + + If `key` is present in the cache, its value is removed and then returned + in the shape of `{:ok, value}`. + + If there's an error with executing the command, `{:error, reason}` + is returned. `reason` is the cause of the error and can be + `Nebulex.KeyError` if the cache does not contain `key`, + `Nebulex.Error` otherwise. + + See `c:Nebulex.Cache.take/2`. + """ + @callback take(adapter_meta(), key(), opts()) :: + Nebulex.Cache.ok_error_tuple(value(), Nebulex.Cache.fetch_error_reason()) + + @doc """ + Updates the counter mapped to the given `key`. + + If `amount` > 0, the counter is incremented by the given `amount`. + If `amount` < 0, the counter is decremented by the given `amount`. + If `amount` == 0, the counter is not updated. + + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. + + See `c:Nebulex.Cache.incr/3`. + See `c:Nebulex.Cache.decr/3`. + """ + @callback update_counter(adapter_meta(), key(), amount, ttl(), default, opts()) :: + Nebulex.Cache.ok_error_tuple(integer()) + when amount: integer(), default: integer() + + @doc """ + Determines if the cache contains an entry for the specified `key`. + + More formally, returns `{:ok, true}` if the cache contains the given `key`. + If the cache doesn't contain `key`, `{:ok, :false}` is returned. + + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. + + See `c:Nebulex.Cache.has_key?/2`. + """ + @callback has_key?(adapter_meta(), key(), opts()) :: Nebulex.Cache.ok_error_tuple(boolean()) + + @doc """ + Returns the remaining time-to-live for the given `key`. + + If `key` is present in the cache, then its remaining TTL is returned + in the shape of `{:ok, ttl}`. + + If there's an error with executing the command, `{:error, reason}` + is returned. `reason` is the cause of the error and can be + `Nebulex.KeyError` if the cache does not contain `key`, + `Nebulex.Error` otherwise. + + See `c:Nebulex.Cache.ttl/2`. + """ + @callback ttl(adapter_meta(), key(), opts()) :: + Nebulex.Cache.ok_error_tuple(value(), Nebulex.Cache.fetch_error_reason()) + + @doc """ + Returns `{:ok, true}` if the given `key` exists and the new `ttl` was + successfully updated, otherwise, `{:ok, false}` is returned. + + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. + + See `c:Nebulex.Cache.expire/3`. + """ + @callback expire(adapter_meta(), key(), ttl(), opts()) :: Nebulex.Cache.ok_error_tuple(boolean()) + + @doc """ + Returns `{:ok, true}` if the given `key` exists and the last access time was + successfully updated, otherwise, `{:ok, false}` is returned. + + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. + + See `c:Nebulex.Cache.touch/2`. + """ + @callback touch(adapter_meta(), key(), opts()) :: Nebulex.Cache.ok_error_tuple(boolean()) +end diff --git a/lib/nebulex/adapter/persistence.ex b/lib/nebulex/adapter/persistence.ex index c2f9f5fa..bfa906f1 100644 --- a/lib/nebulex/adapter/persistence.ex +++ b/lib/nebulex/adapter/persistence.ex @@ -1,106 +1,25 @@ defmodule Nebulex.Adapter.Persistence do @moduledoc """ - Specifies the adapter persistence API. - - ## Default implementation - - This module provides a default implementation that uses `File` and `Stream` - under-the-hood. For dumping a cache to a file, the entries are streamed from - the cache and written in chunks (one chunk per line), and each chunk contains - N number of entries. For loading the entries from a file, the file is read - and streamed line-by-line, so that the entries collected on each line are - inserted in streaming fashion as well. - - The default implementation accepts the following options only for `dump` - operation (there are not options for `load`): - - * `entries_per_line` - The number of entries to be written per line in the - file. Defaults to `10`. - - * `compression` - The compression level. The values are the same as - `:erlang.term_to_binary /2`. Defaults to `6`. - - See `c:Nebulex.Cache.dump/2` and `c:Nebulex.Cache.load/2` for more - information. + Specifies the adapter Persistence API. """ @doc """ Dumps a cache to the given file `path`. - Returns `:ok` if successful, or `{:error, reason}` if an error occurs. + Returns `:ok` if successful, `{:error, reason}` otherwise. See `c:Nebulex.Cache.dump/2`. """ @callback dump(Nebulex.Adapter.adapter_meta(), Path.t(), Nebulex.Cache.opts()) :: - :ok | {:error, term} + :ok | Nebulex.Cache.error_tuple() @doc """ Loads a dumped cache from the given `path`. - Returns `:ok` if successful, or `{:error, reason}` if an error occurs. + Returns `:ok` if successful, `{:error, reason}` otherwise. See `c:Nebulex.Cache.load/2`. """ @callback load(Nebulex.Adapter.adapter_meta(), Path.t(), Nebulex.Cache.opts()) :: - :ok | {:error, term} - - alias Nebulex.Entry - - @doc false - defmacro __using__(_opts) do - quote do - @behaviour Nebulex.Adapter.Persistence - - # sobelow_skip ["Traversal.FileModule"] - @impl true - def dump(%{cache: cache}, path, opts) do - path - |> File.open([:read, :write], fn io_dev -> - nil - |> cache.stream(return: :entry) - |> Stream.filter(&(not Entry.expired?(&1))) - |> Stream.map(&{&1.key, &1.value}) - |> Stream.chunk_every(Keyword.get(opts, :entries_per_line, 10)) - |> Enum.each(fn entries -> - bin = Entry.encode(entries, get_compression(opts)) - :ok = IO.puts(io_dev, bin) - end) - end) - |> handle_response() - end - - # sobelow_skip ["Traversal.FileModule"] - @impl true - def load(%{cache: cache}, path, opts) do - path - |> File.open([:read], fn io_dev -> - io_dev - |> IO.stream(:line) - |> Stream.map(&String.trim/1) - |> Enum.each(fn line -> - entries = Entry.decode(line, [:safe]) - cache.put_all(entries, opts) - end) - end) - |> handle_response() - end - - defoverridable dump: 3, load: 3 - - ## Helpers - - defp handle_response({:ok, _}), do: :ok - defp handle_response({:error, _} = error), do: error - - defp get_compression(opts) do - case Keyword.get(opts, :compression) do - value when is_integer(value) and value >= 0 and value < 10 -> - [compressed: value] - - _ -> - [:compressed] - end - end - end - end + :ok | Nebulex.Cache.error_tuple() end diff --git a/lib/nebulex/adapter/queryable.ex b/lib/nebulex/adapter/queryable.ex index d4a7c530..1428e758 100644 --- a/lib/nebulex/adapter/queryable.ex +++ b/lib/nebulex/adapter/queryable.ex @@ -1,45 +1,58 @@ defmodule Nebulex.Adapter.Queryable do @moduledoc """ - Specifies the query API required from adapters. + Specifies the adapter Query API. ## Query values - There are two types of query values. The ones shared and implemented - by all adapters and the ones that are adapter specific. + There are two types of query values. The ones recommended by Nebulex for all + adapters to implement (shared queries) and the adapter-specific ones. - ### Common queries + ### Shared queries - The following query values are shared and/or supported for all adapters: + The following query values are the ones recommended by Nebulex + for all adapters to implement: - * `nil` - Matches all cached entries. + * `nil` - Query all the entries in the cache. + + * `{:in, keys}` - Query the entries associated with the set of `keys` + requested. ### Adapter-specific queries The `query` value depends entirely on the adapter implementation; it could any term. Therefore, it is highly recommended to see adapters' documentation - for more information about building queries. For example, the built-in + for more information about building queries. For example, `Nebulex.Adapters.Local` adapter uses `:ets.match_spec()` for queries, as well as other pre-defined ones like `:unexpired` and `:expired`. """ @typedoc "Proxy type to the adapter meta" - @type adapter_meta :: Nebulex.Adapter.adapter_meta() + @type adapter_meta() :: Nebulex.Adapter.adapter_meta() + + @typedoc "Proxy type to the query" + @type query() :: Nebulex.Cache.query() @typedoc "Proxy type to the cache options" - @type opts :: Nebulex.Cache.opts() + @type opts() :: Nebulex.Cache.opts() + + @typedoc "Query operation type" + @type operation() :: :all | :count_all | :delete_all @doc """ Executes the `query` according to the given `operation`. - Raises `Nebulex.QueryError` if query is invalid. + This callback returns: + + * `{:ok, result}` - The query was successfully executed. The `result` + could be a list with the matched entries or its count. - In the the adapter does not support the given `operation`, an `ArgumentError` - exception should be raised. + * `{:error, reason}` - An error occurred executing the command. + `reason` is the cause of the error. ## Operations - * `:all` - Returns a list with all entries from cache matching the given - `query`. + * `:get_all` - Returns a list with all entries from cache matching the + given `query`. * `:count_all` - Returns the number of matched entries with the given `query`. * `:delete_all` - Deletes all entries matching the given `query`. @@ -48,19 +61,21 @@ defmodule Nebulex.Adapter.Queryable do It is used on `c:Nebulex.Cache.all/2`, `c:Nebulex.Cache.count_all/2`, and `c:Nebulex.Cache.delete_all/2`. """ - @callback execute( - adapter_meta, - operation :: :all | :count_all | :delete_all, - query :: term, - opts - ) :: [term] | integer + @callback execute(adapter_meta(), operation(), query(), opts()) :: + Nebulex.Cache.ok_error_tuple([any()] | non_neg_integer()) @doc """ Streams the given `query`. - Raises `Nebulex.QueryError` if query is invalid. + This callback returns: + + * `{:ok, stream}` - The query is valid, then the stream is returned. + + * `{:error, reason}` - An error occurred executing the command. + `reason` is the cause of the error. See `c:Nebulex.Cache.stream/2`. """ - @callback stream(adapter_meta, query :: term, opts) :: Enumerable.t() + @callback stream(adapter_meta(), query(), opts()) :: + Nebulex.Cache.ok_error_tuple(Enumerable.t()) end diff --git a/lib/nebulex/adapter/stats.ex b/lib/nebulex/adapter/stats.ex deleted file mode 100644 index 0cc983c8..00000000 --- a/lib/nebulex/adapter/stats.ex +++ /dev/null @@ -1,109 +0,0 @@ -defmodule Nebulex.Adapter.Stats do - @moduledoc """ - Specifies the stats API required from adapters. - - Each adapter is responsible for providing support for stats by implementing - this behaviour. However, this module brings with a default implementation - using [Erlang counters][https://erlang.org/doc/man/counters.html], with all - callbacks overridable, which is supported by the built-in adapters. - - See `Nebulex.Adapters.Local` for more information about how this can be used - from the adapter, and also [Nebulex Telemetry Guide][telemetry_guide] to learn - how to use the Cache with Telemetry. - - [telemetry_guide]: http://hexdocs.pm/nebulex/telemetry.html - """ - - @doc """ - Returns `Nebulex.Stats.t()` with the current stats values. - - If the stats are disabled for the cache, then `nil` is returned. - - The adapter may also include additional custom measurements, - as well as metadata. - - See `c:Nebulex.Cache.stats/0`. - """ - @callback stats(Nebulex.Adapter.adapter_meta()) :: Nebulex.Stats.t() | nil - - @doc false - defmacro __using__(_opts) do - quote do - @behaviour Nebulex.Adapter.Stats - - @impl true - def stats(adapter_meta) do - if counter_ref = adapter_meta[:stats_counter] do - %Nebulex.Stats{ - measurements: %{ - hits: :counters.get(counter_ref, 1), - misses: :counters.get(counter_ref, 2), - writes: :counters.get(counter_ref, 3), - updates: :counters.get(counter_ref, 4), - evictions: :counters.get(counter_ref, 5), - expirations: :counters.get(counter_ref, 6) - }, - metadata: %{ - cache: adapter_meta[:name] || adapter_meta[:cache] - } - } - end - end - - defoverridable stats: 1 - end - end - - import Nebulex.Helpers - - @doc """ - Initializes the Erlang's counter to be used by the adapter. See the module - documentation for more information about the stats default implementation. - - Returns `nil` is the option `:stats` is set to `false` or it is not set at - all; the stats will be skipped. - - ## Example - - Nebulex.Adapter.Stats.init(opts) - - > **NOTE:** This function is usually called by the adapter in case it uses - the default implementation; the adapter should feed `Nebulex.Stats.t()` - counters. - - See adapters documentation for more information about stats implementation. - """ - @spec init(Keyword.t()) :: :counters.counters_ref() | nil - def init(opts) do - case get_boolean_option(opts, :stats, false) do - true -> :counters.new(6, [:write_concurrency]) - false -> nil - end - end - - @doc """ - Increments the `counter`'s `stat_name` by the given `incr` value. - - ## Examples - - Nebulex.Adapter.Stats.incr(stats_counter, :hits) - - Nebulex.Adapter.Stats.incr(stats_counter, :writes, 10) - - > **NOTE:** This function is usually called by the adapter in case it uses - the default implementation; the adapter should feed `Nebulex.Stats.t()` - counters. - - See adapters documentation for more information about stats implementation. - """ - @spec incr(:counters.counters_ref() | nil, atom, integer) :: :ok - def incr(counter, stat_name, incr \\ 1) - - def incr(nil, _stat, _incr), do: :ok - def incr(ref, :hits, incr), do: :counters.add(ref, 1, incr) - def incr(ref, :misses, incr), do: :counters.add(ref, 2, incr) - def incr(ref, :writes, incr), do: :counters.add(ref, 3, incr) - def incr(ref, :updates, incr), do: :counters.add(ref, 4, incr) - def incr(ref, :evictions, incr), do: :counters.add(ref, 5, incr) - def incr(ref, :expirations, incr), do: :counters.add(ref, 6, incr) -end diff --git a/lib/nebulex/adapter/transaction.ex b/lib/nebulex/adapter/transaction.ex index f17fd9e6..b42900bf 100644 --- a/lib/nebulex/adapter/transaction.ex +++ b/lib/nebulex/adapter/transaction.ex @@ -1,6 +1,6 @@ defmodule Nebulex.Adapter.Transaction do @moduledoc """ - Specifies the adapter transactions API. + Specifies the adapter Transaction API. ## Default implementation @@ -35,45 +35,64 @@ defmodule Nebulex.Adapter.Transaction do Locking only the involved key (recommended): - MyCache.transaction [keys: [:counter]], fn -> - counter = MyCache.get(:counter) - MyCache.set(:counter, counter + 1) - end - - MyCache.transaction [keys: [:alice, :bob]], fn -> - alice = MyCache.get(:alice) - bob = MyCache.get(:bob) - MyCache.set(:alice, %{alice | balance: alice.balance + 100}) - MyCache.set(:bob, %{bob | balance: bob.balance + 100}) - end + MyCache.transaction( + fn -> + counter = MyCache.get(:counter) + MyCache.set(:counter, counter + 1) + end, + [keys: [:counter]] + ) + + MyCache.transaction( + fn -> + alice = MyCache.get(:alice) + bob = MyCache.get(:bob) + MyCache.set(:alice, %{alice | balance: alice.balance + 100}) + MyCache.set(:bob, %{bob | balance: bob.balance + 100}) + end, + [keys: [:alice, :bob]] + ) """ @doc """ Runs the given function inside a transaction. - A successful transaction returns the value returned by the function. + If an Elixir exception occurs, the exception will bubble up from the + transaction function. If the transaction is aborted, + `{:error, reason}` is returned. + + A successful transaction returns the value returned by the function wrapped + in a tuple as `{:ok, any()}`. See `c:Nebulex.Cache.transaction/2`. """ - @callback transaction(Nebulex.Adapter.adapter_meta(), Nebulex.Cache.opts(), fun) :: any + @callback transaction(Nebulex.Adapter.adapter_meta(), fun(), Nebulex.Cache.opts()) :: + Nebulex.Cache.ok_error_tuple(any()) @doc """ - Returns `true` if the given process is inside a transaction. + Returns `{:ok, true}` if the current process is inside a transaction, + otherwise, `{:ok, false}` is returned. - See `c:Nebulex.Cache.in_transaction?/0`. + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. + + See `c:Nebulex.Cache.in_transaction?/1`. """ - @callback in_transaction?(Nebulex.Adapter.adapter_meta()) :: boolean + @callback in_transaction?(Nebulex.Adapter.adapter_meta(), Nebulex.Cache.opts()) :: + Nebulex.Cache.ok_error_tuple(boolean()) @doc false defmacro __using__(_opts) do quote do @behaviour Nebulex.Adapter.Transaction + import Nebulex.Utils, only: [wrap_ok: 1, wrap_error: 2] + @impl true - def transaction(%{cache: cache, pid: pid} = adapter_meta, opts, fun) do + def transaction(%{cache: cache, pid: pid} = adapter_meta, fun, opts) do adapter_meta - |> in_transaction?() + |> do_in_transaction?() |> do_transaction( pid, adapter_meta[:name] || cache, @@ -85,16 +104,20 @@ defmodule Nebulex.Adapter.Transaction do end @impl true - def in_transaction?(%{pid: pid}) do - !!Process.get({pid, self()}) + def in_transaction?(adapter_meta, _opts) do + wrap_ok do_in_transaction?(adapter_meta) end - defoverridable transaction: 3, in_transaction?: 1 + defoverridable transaction: 3, in_transaction?: 2 ## Helpers + defp do_in_transaction?(%{pid: pid}) do + !!Process.get({pid, self()}) + end + defp do_transaction(true, _pid, _name, _keys, _nodes, _retries, fun) do - fun.() + {:ok, fun.()} end defp do_transaction(false, pid, name, keys, nodes, retries, fun) do @@ -105,7 +128,7 @@ defmodule Nebulex.Adapter.Transaction do try do _ = Process.put({pid, self()}, %{keys: keys, nodes: nodes}) - fun.() + {:ok, fun.()} after _ = Process.delete({pid, self()}) @@ -113,7 +136,7 @@ defmodule Nebulex.Adapter.Transaction do end false -> - raise "transaction aborted" + wrap_error Nebulex.Error, reason: :transaction_aborted, nodes: nodes, cache: name end end diff --git a/lib/nebulex/adapters/common/info.ex b/lib/nebulex/adapters/common/info.ex new file mode 100644 index 00000000..7f2216e6 --- /dev/null +++ b/lib/nebulex/adapters/common/info.ex @@ -0,0 +1,116 @@ +defmodule Nebulex.Adapters.Common.Info do + @moduledoc """ + A simple/default implementation for `Nebulex.Adapter.Info` behaviour. + + The implementation defines the following information specifications: + + * `:server` - A map with general information about the cache server. + Includes the following keys: + + * `:nbx_version` - The Nebulex version. + * `:cache_module` - The defined cache module. + * `:cache_adapter` - The cache adapter. + * `:cache_name` - The cache name. + * `:cache_pid` - The cache PID. + + * `:stats` - A map with the cache statistics keys, as specified by + `Nebulex.Adapters.Common.Info.Stats`. + + The info data will look like this: + + %{ + server: %{ + nbx_version: "3.0.0", + cache_module: "MyCache", + cache_adapter: "Nebulex.Adapters.Local", + cache_name: "MyCache", + cache_pid: #PID<0.111.0> + }, + stats: %{ + deletions: 0, + evictions: 0, + expirations: 0, + hits: 0, + misses: 0, + updates: 0, + writes: 0 + } + } + + """ + + @doc false + defmacro __using__(_opts) do + quote do + @behaviour Nebulex.Adapter.Info + + alias Nebulex.Adapters.Common.Info + + @impl true + def info(adapter_meta, path, _opts) do + {:ok, Info.info(adapter_meta, path)} + end + + defoverridable info: 3 + end + end + + alias Nebulex.Adapters.Common.Info.Stats + + @doc false + def info(adapter_meta, spec) + + def info(adapter_meta, spec) when is_list(spec) do + for i <- spec, into: %{} do + {i, do_info!(adapter_meta, i)} + end + end + + def info(adapter_meta, spec) do + do_info!(adapter_meta, spec) + end + + defp do_info!(adapter_meta, :all) do + %{ + server: server_info(adapter_meta), + stats: stats(adapter_meta) + } + end + + defp do_info!(adapter_meta, :server) do + server_info(adapter_meta) + end + + defp do_info!(adapter_meta, :stats) do + stats(adapter_meta) + end + + defp do_info!(_adapter_meta, spec) do + raise ArgumentError, "invalid information specification key #{inspect(spec)}" + end + + ## Helpers + + defp server_info(adapter_meta) do + %{ + nbx_version: nbx_vsn(), + cache_module: adapter_meta[:cache], + cache_adapter: adapter_meta[:adapter], + cache_name: adapter_meta[:name], + cache_pid: adapter_meta[:pid] + } + end + + defp stats(%{stats_counter: {:write_concurrency, _} = ref}) do + Stats.count(ref) + end + + defp stats(_adapter_meta) do + Stats.new() + end + + defp nbx_vsn do + Application.spec(:nebulex, :vsn) + |> to_string() + end +end diff --git a/lib/nebulex/adapters/common/info/stats.ex b/lib/nebulex/adapters/common/info/stats.ex new file mode 100644 index 00000000..c24c051c --- /dev/null +++ b/lib/nebulex/adapters/common/info/stats.ex @@ -0,0 +1,181 @@ +defmodule Nebulex.Adapters.Common.Info.Stats do + @moduledoc """ + Stats implementation using [Erlang counters][erl_counters]. + + Adapters are directly responsible for implementing the `Nebulex.Adapter.Info` + behaviour and adding an info spec for stats. However, this module provides a + simple implementation for stats using [Erlang counters][erl_counters]. + + An info specification `stats` is added to the info data, which is a map + with the following keys or measurements: + + * `:hits` - The requested data is successfully retrieved from the cache. + + * `:misses` - When a system or application makes a request to retrieve + data from a cache, but that specific data is not currently in cache + memory. A cache miss occurs either because the data was never placed + in the cache, or because the data was removed (“evicted”) from the + cache by either the caching system itself or an external application + that specifically made that eviction request. + + * `:evictions` - Eviction by the caching system itself occurs when + space needs to be freed up to add new data to the cache, or if + the time-to-live policy on the data expired. + + * `:expirations` - When the time-to-live policy on the data expired. + + * `:updates` - When existing data is successfully updated. + + * `:writes` - When data is inserted or overwritten. + + * `:deletions` - The data was intentionally removed by either the + caching system or an external application that specifically made + that deletion request. + + See the `Nebulex.Adapters.Local` adapter and `Nebulex.Adapters.Common.Info` + for more information about the usage. + + [erl_counters]: https://erlang.org/doc/man/counters.html + """ + + alias __MODULE__.TelemetryHandler + alias Nebulex.Telemetry + + ## Types & Constants + + @typedoc "The stat type" + @type stat() :: + :hits + | :misses + | :evictions + | :expirations + | :writes + | :updates + | :deletions + + @typedoc "Stats type" + @type stats() :: %{required(stat()) => integer()} + + # Supported stats + @stats [ + :hits, + :misses, + :evictions, + :expirations, + :writes, + :updates, + :deletions + ] + + ## API + + @doc """ + Returns the Erlang's counter to be used by the adapter for keeping the cache + stats. It also initiates the Telemetry handler for handling and/or updating + the cache stats in runtime under the hood. + + Any adapter using `Nebulex.Adapters.Common.Info` implementation must call + this init function in the `c:Nebulex.Adapter.init/1` implementation and + include the returned counter within the adapter metadata under the key + `:stats_counter`. See the `Nebulex.Adapters.Nil` for example. + + ## Example + + Nebulex.Adapters.Common.Info.Stats.init(opts) + + > **NOTE:** This function is usually called by the adapter in case it uses + > the default implementation; the adapter should feed the stats counters. + """ + @spec init(keyword()) :: :counters.counters_ref() + def init(opts) do + stats_counter = :counters.new(7, [:write_concurrency]) + + _ = + Telemetry.attach_many( + stats_counter, + [Keyword.fetch!(opts, :telemetry_prefix) ++ [:command, :stop]], + &TelemetryHandler.handle_event/4, + stats_counter + ) + + stats_counter + end + + @doc """ + Increments counter(s) for the given stat(s) by `incr`. + + ## Examples + + Nebulex.Adapters.Common.Info.Stats.incr(stats_counter, :hits) + + Nebulex.Adapters.Common.Info.Stats.incr(stats_counter, :writes, 10) + + Nebulex.Adapters.Common.Info.Stats.incr(stats_counter, [:misses, :deletions]) + + > **NOTE:** This function is usually called by the adapter in case it uses + > the default implementation; the adapter should feed the stats counters. + """ + @spec incr(:counters.counters_ref(), atom() | [atom()], integer()) :: :ok + def incr(counter, stats, incr \\ 1) + + def incr(ref, :hits, incr), do: :counters.add(ref, 1, incr) + def incr(ref, :misses, incr), do: :counters.add(ref, 2, incr) + def incr(ref, :evictions, incr), do: :counters.add(ref, 3, incr) + def incr(ref, :expirations, incr), do: :counters.add(ref, 4, incr) + def incr(ref, :writes, incr), do: :counters.add(ref, 5, incr) + def incr(ref, :updates, incr), do: :counters.add(ref, 6, incr) + def incr(ref, :deletions, incr), do: :counters.add(ref, 7, incr) + def incr(ref, l, incr) when is_list(l), do: Enum.each(l, &incr(ref, &1, incr)) + + @doc """ + Returns a map with all counters/stats count. + + ## Examples + + Nebulex.Adapters.Common.Info.Stats.count(stats_counter) + + > **NOTE:** This function is usually called by the adapter in case it uses + > the default implementation; the adapter should feed the stats counters. + """ + @spec count(:counters.counters_ref()) :: stats() + def count(ref) do + for s <- @stats, into: %{}, do: {s, count(ref, s)} + end + + @doc """ + Returns the current count for the stats counter given by `stat`. + + ## Examples + + Nebulex.Adapters.Common.Info.Stats.count(stats_counter, :hits) + + > **NOTE:** This function is usually called by the adapter in case it uses + > the default implementation; the adapter should feed the stats counters. + """ + @spec count(:counters.counters_ref(), stat()) :: integer() + def count(ref, stat) + + def count(ref, :hits), do: :counters.get(ref, 1) + def count(ref, :misses), do: :counters.get(ref, 2) + def count(ref, :evictions), do: :counters.get(ref, 3) + def count(ref, :expirations), do: :counters.get(ref, 4) + def count(ref, :writes), do: :counters.get(ref, 5) + def count(ref, :updates), do: :counters.get(ref, 6) + def count(ref, :deletions), do: :counters.get(ref, 7) + + @doc """ + Convenience function for returning a map with all stats set to `0`. + """ + @spec new() :: stats() + def new do + %{ + deletions: 0, + evictions: 0, + expirations: 0, + hits: 0, + misses: 0, + updates: 0, + writes: 0 + } + end +end diff --git a/lib/nebulex/adapters/common/info/stats/telemetry_handler.ex b/lib/nebulex/adapters/common/info/stats/telemetry_handler.ex new file mode 100644 index 00000000..aae8040d --- /dev/null +++ b/lib/nebulex/adapters/common/info/stats/telemetry_handler.ex @@ -0,0 +1,146 @@ +defmodule Nebulex.Adapters.Common.Info.Stats.TelemetryHandler do + @moduledoc """ + Telemetry handler for aggregating cache stats; it relies on the default + `Nebulex.Adapters.Common.Info` implementation based on Erlang counters. + + See `Nebulex.Adapters.Common.Info.Stats`. + """ + + alias Nebulex.Adapters.Common.Info.Stats + + ## Handler + + @doc false + def handle_event( + _event, + _measurements, + %{adapter_meta: %{stats_counter: {:write_concurrency, _} = ref}} = metadata, + ref + ) do + update_stats(metadata) + end + + # coveralls-ignore-start + + def handle_event(_event, _measurements, _metadata, _ref) do + :ok + end + + # coveralls-ignore-stop + + defp update_stats(%{ + command: action, + result: {:error, %Nebulex.KeyError{reason: :expired}}, + adapter_meta: %{stats_counter: ref} + }) + when action in [:fetch, :take, :ttl, :has_key?] do + :ok = Stats.incr(ref, [:misses, :evictions, :expirations, :deletions]) + end + + defp update_stats(%{ + command: action, + result: {:error, %Nebulex.KeyError{reason: :not_found}}, + adapter_meta: %{stats_counter: ref} + }) + when action in [:fetch, :take, :ttl, :has_key?] do + :ok = Stats.incr(ref, :misses) + end + + defp update_stats(%{ + command: action, + result: {:ok, _}, + adapter_meta: %{stats_counter: ref} + }) + when action in [:fetch, :ttl, :has_key?] do + :ok = Stats.incr(ref, :hits) + end + + defp update_stats(%{ + command: :take, + result: {:ok, _}, + adapter_meta: %{stats_counter: ref} + }) do + :ok = Stats.incr(ref, [:hits, :deletions]) + end + + defp update_stats(%{ + command: :put, + args: [_, _, _, :replace, _], + result: {:ok, true}, + adapter_meta: %{stats_counter: ref} + }) do + :ok = Stats.incr(ref, :updates) + end + + defp update_stats(%{ + command: :put, + result: {:ok, true}, + adapter_meta: %{stats_counter: ref} + }) do + :ok = Stats.incr(ref, :writes) + end + + defp update_stats(%{ + command: :put_all, + result: {:ok, true}, + args: [entries | _], + adapter_meta: %{stats_counter: ref} + }) do + :ok = Stats.incr(ref, :writes, Enum.count(entries)) + end + + defp update_stats(%{ + command: :delete, + result: :ok, + adapter_meta: %{stats_counter: ref} + }) do + :ok = Stats.incr(ref, :deletions) + end + + defp update_stats(%{ + command: :execute, + args: [:get_all, {:in, keys} | _], + result: {:ok, list}, + adapter_meta: %{stats_counter: ref} + }) do + len = length(list) + + :ok = Stats.incr(ref, :hits, len) + :ok = Stats.incr(ref, :misses, Enum.count(keys) - len) + end + + defp update_stats(%{ + command: :execute, + args: [:delete_all | _], + result: {:ok, result}, + adapter_meta: %{stats_counter: ref} + }) do + :ok = Stats.incr(ref, :deletions, result) + end + + defp update_stats(%{ + command: action, + result: {:ok, true}, + adapter_meta: %{stats_counter: ref} + }) + when action in [:expire, :touch] do + :ok = Stats.incr(ref, :updates) + end + + defp update_stats(%{ + command: :update_counter, + args: [_, amount, _, default, _], + result: {:ok, result}, + adapter_meta: %{stats_counter: ref} + }) do + offset = if amount >= 0, do: -1, else: 1 + + if result + amount * offset === default do + :ok = Stats.incr(ref, :writes) + else + :ok = Stats.incr(ref, :updates) + end + end + + defp update_stats(_), do: :ok +end diff --git a/lib/nebulex/adapters/local.ex b/lib/nebulex/adapters/local.ex deleted file mode 100644 index 409fd145..00000000 --- a/lib/nebulex/adapters/local.ex +++ /dev/null @@ -1,1010 +0,0 @@ -defmodule Nebulex.Adapters.Local do - @moduledoc ~S""" - Adapter module for Local Generational Cache; inspired by - [epocxy](https://github.com/duomark/epocxy). - - Generational caching using an ets table (or multiple ones when used with - `:shards`) for each generation of cached data. Accesses hit the newer - generation first, and migrate from the older generation to the newer - generation when retrieved from the stale table. When a new generation - is started, the oldest one is deleted. This is a form of mass garbage - collection which avoids using timers and expiration of individual - cached elements. - - This implementation of generation cache uses only two generations - (which is more than enough) also referred like the `newer` and - the `older`. - - ## Overall features - - * Configurable backend (`ets` or `:shards`). - * Expiration – A status based on TTL (Time To Live) option. To maintain - cache performance, expired entries may not be immediately removed or - evicted, they are expired or evicted on-demand, when the key is read. - * Eviction – [Generational Garbage Collection][gc]. - * Sharding – For intensive workloads, the Cache may also be partitioned - (by using `:shards` backend and specifying the `:partitions` option). - * Support for transactions via Erlang global name registration facility. - * Support for stats. - - [gc]: http://hexdocs.pm/nebulex/Nebulex.Adapters.Local.Generation.html - - ## Options - - This adapter supports the following options and all of them can be given via - the cache configuration: - - * `:backend` - Defines the backend or storage to be used for the adapter. - Supported backends are: `:ets` and `:shards`. Defaults to `:ets`. - - * `:read_concurrency` - (boolean) Since this adapter uses ETS tables - internally, this option is used when a new table is created; see - `:ets.new/2`. Defaults to `true`. - - * `:write_concurrency` - (boolean) Since this adapter uses ETS tables - internally, this option is used when a new table is created; see - `:ets.new/2`. Defaults to `true`. - - * `:compressed` - (boolean) This option is used when a new ETS table is - created and it defines whether or not it includes X as an option; see - `:ets.new/2`. Defaults to `false`. - - * `:backend_type` - This option defines the type of ETS to be used - (Defaults to `:set`). However, it is highly recommended to keep the - default value, since there are commands not supported (unexpected - exception may be raised) for types like `:bag` or `: duplicate_bag`. - Please see the [ETS](https://erlang.org/doc/man/ets.html) docs - for more information. - - * `:partitions` - If it is set, an integer > 0 is expected, otherwise, - it defaults to `System.schedulers_online()`. This option is only - available for `:shards` backend. - - * `:gc_interval` - If it is set, an integer > 0 is expected defining the - interval time in milliseconds to garbage collection to run, delete the - oldest generation and create a new one. If this option is not set, - garbage collection is never executed, so new generations must be - created explicitly, e.g.: `MyCache.new_generation(opts)`. - - * `:max_size` - If it is set, an integer > 0 is expected defining the - max number of cached entries (cache limit). If it is not set (`nil`), - the check to release memory is not performed (the default). - - * `:allocated_memory` - If it is set, an integer > 0 is expected defining - the max size in bytes allocated for a cache generation. When this option - is set and the configured value is reached, a new cache generation is - created so the oldest is deleted and force releasing memory space. - If it is not set (`nil`), the cleanup check to release memory is - not performed (the default). - - * `:gc_cleanup_min_timeout` - An integer > 0 defining the min timeout in - milliseconds for triggering the next cleanup and memory check. This will - be the timeout to use when either the max size or max allocated memory - is reached. Defaults to `10_000` (10 seconds). - - * `:gc_cleanup_max_timeout` - An integer > 0 defining the max timeout in - milliseconds for triggering the next cleanup and memory check. This is - the timeout used when the cache starts and there are few entries or the - consumed memory is near to `0`. Defaults to `600_000` (10 minutes). - - * `:gc_flush_delay` - If it is set, an integer > 0 is expected defining the - delay in milliseconds before objects from the oldest generation are - flushed. Defaults to `10_000` (10 seconds). - - ## Usage - - `Nebulex.Cache` is the wrapper around the cache. We can define a - local cache as follows: - - defmodule MyApp.LocalCache do - use Nebulex.Cache, - otp_app: :my_app, - adapter: Nebulex.Adapters.Local - end - - Where the configuration for the cache must be in your application - environment, usually defined in your `config/config.exs`: - - config :my_app, MyApp.LocalCache, - gc_interval: :timer.hours(12), - max_size: 1_000_000, - allocated_memory: 2_000_000_000, - gc_cleanup_min_timeout: :timer.seconds(10), - gc_cleanup_max_timeout: :timer.minutes(10) - - For intensive workloads, the Cache may also be partitioned using `:shards` - as cache backend (`backend: :shards`) and configuring the desired number of - partitions via the `:partitions` option. Defaults to - `System.schedulers_online()`. - - config :my_app, MyApp.LocalCache, - gc_interval: :timer.hours(12), - max_size: 1_000_000, - allocated_memory: 2_000_000_000, - gc_cleanup_min_timeout: :timer.seconds(10), - gc_cleanup_max_timeout: :timer.minutes(10), - backend: :shards, - partitions: System.schedulers_online() * 2 - - If your application was generated with a supervisor (by passing `--sup` - to `mix new`) you will have a `lib/my_app/application.ex` file containing - the application start callback that defines and starts your supervisor. - You just need to edit the `start/2` function to start the cache as a - supervisor on your application's supervisor: - - def start(_type, _args) do - children = [ - {MyApp.LocalCache, []}, - ... - ] - - See `Nebulex.Cache` for more information. - - ## Eviction configuration - - This section is to understand a bit better how the different configuration - options work and have an idea what values to set; especially if it is the - first time using Nebulex. - - ### `:ttl` option - - The `:ttl` option that is used to set the expiration time for a key, it - doesn't work as eviction mechanism, since the local adapter implements a - generational cache, the options that control the eviction process are: - `:gc_interval`, `:gc_cleanup_min_timeout`, `:gc_cleanup_max_timeout`, - `:max_size` and `:allocated_memory`. The `:ttl` is evaluated on-demand - when a key is retrieved, and at that moment if it s expired, then remove - it from the cache, hence, it can not be used as eviction method, it is - more for keep the integrity and consistency in the cache. For this reason, - it is highly recommended to configure always the eviction options mentioned - before. - - ### Caveats when using `:ttl` option: - - * When using the `:ttl` option, ensure it is less than `:gc_interval`, - otherwise, there may be a situation where the key is evicted and the - `:ttl` hasn't happened yet (maybe because the garbage collector ran - before the key had been fetched). - * Assuming you have `:gc_interval` set to 2 hrs, then you put a new key - with `:ttl` set to 1 hr, and 1 minute later the GC runs, that key will - be moved to the older generation so it can be yet retrieved. On the other - hand, if the key is never fetched till the next GC cycle (causing moving - it to the newer generation), since the key is already in the oldest - generation it will be evicted from the cache so it won't be retrievable - anymore. - - ### Garbage collection or eviction options - - This adapter implements a generational cache, which means its main eviction - mechanism is pushing a new cache generation and remove the oldest one. In - this way, we ensure only the most frequently used keys are always available - in the newer generation and the the least frequently used are evicted when - the garbage collector runs, and the garbage collector is triggered upon - these conditions: - - * When the time interval defined by `:gc_interval` is completed. - This makes the garbage-collector process to run creating a new - generation and forcing to delete the oldest one. - * When the "cleanup" timeout expires, and then the limits `:max_size` - and `:allocated_memory` are checked, if one of those is reached, - then the garbage collector runs (a new generation is created and - the oldest one is deleted). The cleanup timeout is controlled by - `:gc_cleanup_min_timeout` and `:gc_cleanup_max_timeout`, it works - with an inverse linear backoff, which means the timeout is inverse - proportional to the memory growth; the bigger the cache size is, - the shorter the cleanup timeout will be. - - ### First-time configuration - - For configuring the cache with accurate and/or good values it is important - to know several things in advance, like for example the size of an entry - in average so we can calculate a good value for max size and/or allocated - memory, how intensive will be the load in terms of reads and writes, etc. - The problem is most of these aspects are unknown when it is a new app or - we are using the cache for the first time. Therefore, the following - recommendations will help you to configure the cache for the first time: - - * When configuring the `:gc_interval`, think about how that often the - least frequently used entries should be evicted, or what is the desired - retention period for the cached entries. For example, if `:gc_interval` - is set to 1 hr, it means you will keep in cache only those entries that - are retrieved periodically within a 2 hr period; `gc_interval * 2`, - being 2 the number of generations. Longer than that, the GC will - ensure is always evicted (the oldest generation is always deleted). - If it is the first time using Nebulex, perhaps you can start with - `gc_interval: :timer.hours(12)` (12 hrs), so the max retention - period for the keys will be 1 day; but ensure you also set either the - `:max_size` or `:allocated_memory`. - * It is highly recommended to set either `:max_size` or `:allocated_memory` - to ensure the oldest generation is deleted (least frequently used keys - are evicted) when one of these limits is reached and also to avoid - running out of memory. For example, for the `:allocated_memory` we can - set 25% of the total memory, and for the `:max_size` something between - `100_000` and `1_000_000`. - * For `:gc_cleanup_min_timeout` we can set `10_000`, which means when the - cache is reaching the size or memory limit, the polling period for the - cleanup process will be 10 seconds. And for `:gc_cleanup_max_timeout` - we can set `600_000`, which means when the cache is almost empty the - polling period will be close to 10 minutes. - - ## Stats - - This adapter does support stats by using the default implementation - provided by `Nebulex.Adapter.Stats`. The adapter also uses the - `Nebulex.Telemetry.StatsHandler` to aggregate the stats and keep - them updated. Therefore, it requires the Telemetry events are emitted - by the adapter (the `:telemetry` option should not be set to `false` - so the Telemetry events can be dispatched), otherwise, stats won't - work properly. - - ## Queryable API - - Since this adapter is implemented on top of ETS tables, the query must be - a valid match spec given by `:ets.match_spec()`. However, there are some - predefined and/or shorthand queries you can use. See the section - "Predefined queries" below for for information. - - Internally, an entry is represented by the tuple - `{:entry, key, value, touched, ttl}`, which means the match pattern within - the `:ets.match_spec()` must be something like: - `{:entry, :"$1", :"$2", :"$3", :"$4"}`. - In order to make query building easier, you can use `Ex2ms` library. - - ### Predefined queries - - * `nil` - All keys are returned. - - * `:unexpired` - All unexpired keys/entries. - - * `:expired` - All expired keys/entries. - - * `{:in, [term]}` - Only the keys in the given key list (`[term]`) - are returned. This predefined query is only supported for - `c:Nebulex.Cache.delete_all/2`. This is the recommended - way of doing bulk delete of keys. - - ## Examples - - # built-in queries - MyCache.all() - MyCache.all(:unexpired) - MyCache.all(:expired) - MyCache.all({:in, ["foo", "bar"]}) - - # using a custom match spec (all values > 10) - spec = [{{:_, :"$1", :"$2", :_, :_}, [{:>, :"$2", 10}], [{{:"$1", :"$2"}}]}] - MyCache.all(spec) - - # using Ex2ms - import Ex2ms - - spec = - fun do - {_, key, value, _, _} when value > 10 -> {key, value} - end - - MyCache.all(spec) - - The `:return` option applies only for built-in queries, such as: - `nil | :unexpired | :expired`, if you are using a custom `:ets.match_spec()`, - the return value depends on it. - - The same applies to the `stream` function. - - ## Extended API (convenience functions) - - This adapter provides some additional convenience functions to the - `Nebulex.Cache` API. - - Creating new generations: - - MyCache.new_generation() - MyCache.new_generation(reset_timer: false) - - Retrieving the current generations: - - MyCache.generations() - - Retrieving the newer generation: - - MyCache.newer_generation() - - """ - - # Provide Cache Implementation - @behaviour Nebulex.Adapter - @behaviour Nebulex.Adapter.Entry - @behaviour Nebulex.Adapter.Queryable - - # Inherit default transaction implementation - use Nebulex.Adapter.Transaction - - # Inherit default persistence implementation - use Nebulex.Adapter.Persistence - - # Inherit default stats implementation - use Nebulex.Adapter.Stats - - import Nebulex.Adapter - import Nebulex.Helpers - import Record - - alias Nebulex.Adapter.Stats - alias Nebulex.Adapters.Local.{Backend, Generation, Metadata} - alias Nebulex.{Entry, Time} - - # Cache Entry - defrecord(:entry, - key: nil, - value: nil, - touched: nil, - ttl: nil - ) - - # Supported Backends - @backends ~w(ets shards)a - - # Inline common instructions - @compile {:inline, list_gen: 1, newer_gen: 1, test_ms: 0} - - ## Nebulex.Adapter - - @impl true - defmacro __before_compile__(_env) do - quote do - @doc """ - A convenience function for creating new generations. - """ - def new_generation(opts \\ []) do - Generation.new(get_dynamic_cache(), opts) - end - - @doc """ - A convenience function for reset the GC timer. - """ - def reset_generation_timer do - Generation.reset_timer(get_dynamic_cache()) - end - - @doc """ - A convenience function for retrieving the current generations. - """ - def generations do - Generation.list(get_dynamic_cache()) - end - - @doc """ - A convenience function for retrieving the newer generation. - """ - def newer_generation do - Generation.newer(get_dynamic_cache()) - end - end - end - - @impl true - def init(opts) do - # Required options - cache = Keyword.fetch!(opts, :cache) - telemetry = Keyword.fetch!(opts, :telemetry) - telemetry_prefix = Keyword.fetch!(opts, :telemetry_prefix) - - # Init internal metadata table - meta_tab = opts[:meta_tab] || Metadata.init() - - # Init stats_counter - stats_counter = Stats.init(opts) - - # Resolve the backend to be used - backend = - opts - |> Keyword.get(:backend, :ets) - |> case do - val when val in @backends -> - val - - val -> - raise "expected backend: option to be one of the supported backends " <> - "#{inspect(@backends)}, got: #{inspect(val)}" - end - - # Internal option for max nested match specs based on number of keys - purge_batch_size = - get_option( - opts, - :purge_batch_size, - "an integer > 0", - &(is_integer(&1) and &1 > 0), - 100 - ) - - # Build adapter metadata - adapter_meta = %{ - cache: cache, - telemetry: telemetry, - telemetry_prefix: telemetry_prefix, - meta_tab: meta_tab, - stats_counter: stats_counter, - backend: backend, - purge_batch_size: purge_batch_size, - started_at: DateTime.utc_now() - } - - # Build adapter child_spec - child_spec = Backend.child_spec(backend, [adapter_meta: adapter_meta] ++ opts) - - {:ok, child_spec, adapter_meta} - end - - ## Nebulex.Adapter.Entry - - @impl true - def get(adapter_meta, key, _opts) do - adapter_meta - |> get_(key) - |> handle_expired() - end - - defspan get_(adapter_meta, key), as: :get do - adapter_meta.meta_tab - |> list_gen() - |> do_get(key, adapter_meta.backend) - |> return(:value) - end - - defp do_get([newer], key, backend) do - gen_fetch(newer, key, backend) - end - - defp do_get([newer, older], key, backend) do - with nil <- gen_fetch(newer, key, backend), - entry(key: ^key) = cached <- gen_fetch(older, key, backend, &pop_entry/4) do - true = backend.insert(newer, cached) - cached - end - end - - defp gen_fetch(gen, key, backend, fun \\ &get_entry/4) do - gen - |> fun.(key, nil, backend) - |> validate_ttl(gen, backend) - end - - @impl true - defspan get_all(adapter_meta, keys, _opts) do - adapter_meta = %{adapter_meta | telemetry: Map.get(adapter_meta, :in_span?, false)} - - Enum.reduce(keys, %{}, fn key, acc -> - case get(adapter_meta, key, []) do - nil -> acc - obj -> Map.put(acc, key, obj) - end - end) - end - - @impl true - defspan put(adapter_meta, key, value, ttl, on_write, _opts) do - do_put( - on_write, - adapter_meta.meta_tab, - adapter_meta.backend, - entry( - key: key, - value: value, - touched: Time.now(), - ttl: ttl - ) - ) - end - - defp do_put(:put, meta_tab, backend, entry) do - put_entries(meta_tab, backend, entry) - end - - defp do_put(:put_new, meta_tab, backend, entry) do - put_new_entries(meta_tab, backend, entry) - end - - defp do_put(:replace, meta_tab, backend, entry(key: key, value: value)) do - update_entry(meta_tab, backend, key, [{3, value}]) - end - - @impl true - defspan put_all(adapter_meta, entries, ttl, on_write, _opts) do - entries = - for {key, value} <- entries, value != nil do - entry(key: key, value: value, touched: Time.now(), ttl: ttl) - end - - do_put_all( - on_write, - adapter_meta.meta_tab, - adapter_meta.backend, - adapter_meta.purge_batch_size, - entries - ) - end - - defp do_put_all(:put, meta_tab, backend, batch_size, entries) do - put_entries(meta_tab, backend, entries, batch_size) - end - - defp do_put_all(:put_new, meta_tab, backend, batch_size, entries) do - put_new_entries(meta_tab, backend, entries, batch_size) - end - - @impl true - defspan delete(adapter_meta, key, _opts) do - adapter_meta.meta_tab - |> list_gen() - |> Enum.each(&adapter_meta.backend.delete(&1, key)) - end - - @impl true - def take(adapter_meta, key, _opts) do - adapter_meta - |> take_(key) - |> handle_expired() - end - - defspan take_(adapter_meta, key), as: :take do - adapter_meta.meta_tab - |> list_gen() - |> Enum.reduce_while(nil, fn gen, acc -> - case pop_entry(gen, key, nil, adapter_meta.backend) do - nil -> - {:cont, acc} - - res -> - value = - res - |> validate_ttl(gen, adapter_meta.backend) - |> return(:value) - - {:halt, value} - end - end) - end - - @impl true - defspan update_counter(adapter_meta, key, amount, ttl, default, _opts) do - # Get needed metadata - meta_tab = adapter_meta.meta_tab - backend = adapter_meta.backend - - # Verify if the key has expired - _ = - meta_tab - |> list_gen() - |> do_get(key, backend) - - # Run the counter operation - meta_tab - |> newer_gen() - |> backend.update_counter( - key, - {3, amount}, - entry(key: key, value: default, touched: Time.now(), ttl: ttl) - ) - end - - @impl true - def has_key?(adapter_meta, key) do - case get(adapter_meta, key, []) do - nil -> false - _ -> true - end - end - - @impl true - defspan ttl(adapter_meta, key) do - adapter_meta.meta_tab - |> list_gen() - |> do_get(key, adapter_meta.backend) - |> return() - |> entry_ttl() - end - - defp entry_ttl(nil), do: nil - defp entry_ttl(:"$expired"), do: nil - defp entry_ttl(entry(ttl: :infinity)), do: :infinity - - defp entry_ttl(entry(ttl: ttl, touched: touched)) do - ttl - (Time.now() - touched) - end - - defp entry_ttl(entries) when is_list(entries) do - for entry <- entries, do: entry_ttl(entry) - end - - @impl true - defspan expire(adapter_meta, key, ttl) do - update_entry(adapter_meta.meta_tab, adapter_meta.backend, key, [{4, Time.now()}, {5, ttl}]) - end - - @impl true - defspan touch(adapter_meta, key) do - update_entry(adapter_meta.meta_tab, adapter_meta.backend, key, [{4, Time.now()}]) - end - - ## Nebulex.Adapter.Queryable - - @impl true - defspan execute(adapter_meta, operation, query, opts) do - do_execute(adapter_meta, operation, query, opts) - end - - defp do_execute(%{meta_tab: meta_tab, backend: backend}, :count_all, nil, _opts) do - meta_tab - |> list_gen() - |> Enum.reduce(0, fn gen, acc -> - gen - |> backend.info(:size) - |> Kernel.+(acc) - end) - end - - defp do_execute(%{meta_tab: meta_tab}, :delete_all, nil, _opts) do - Generation.delete_all(meta_tab) - end - - defp do_execute(%{meta_tab: meta_tab} = adapter_meta, :delete_all, {:in, keys}, _opts) - when is_list(keys) do - meta_tab - |> list_gen() - |> Enum.reduce(0, fn gen, acc -> - do_delete_all(adapter_meta.backend, gen, keys, adapter_meta.purge_batch_size) + acc - end) - end - - defp do_execute(%{meta_tab: meta_tab, backend: backend}, operation, query, opts) do - query = - query - |> validate_match_spec(opts) - |> maybe_match_spec_return_true(operation) - - {reducer, acc_in} = - case operation do - :all -> {&(backend.select(&1, query) ++ &2), []} - :count_all -> {&(backend.select_count(&1, query) + &2), 0} - :delete_all -> {&(backend.select_delete(&1, query) + &2), 0} - end - - meta_tab - |> list_gen() - |> Enum.reduce(acc_in, reducer) - end - - @impl true - defspan stream(adapter_meta, query, opts) do - query - |> validate_match_spec(opts) - |> do_stream(adapter_meta, Keyword.get(opts, :page_size, 20)) - end - - defp do_stream(match_spec, %{meta_tab: meta_tab, backend: backend}, page_size) do - Stream.resource( - fn -> - [newer | _] = generations = list_gen(meta_tab) - result = backend.select(newer, match_spec, page_size) - {result, generations} - end, - fn - {:"$end_of_table", [_gen]} -> - {:halt, []} - - {:"$end_of_table", [_gen | generations]} -> - result = - generations - |> hd() - |> backend.select(match_spec, page_size) - - {[], {result, generations}} - - {{elements, cont}, [_ | _] = generations} -> - {elements, {backend.select(cont), generations}} - end, - & &1 - ) - end - - ## Nebulex.Adapter.Persistence - - @impl true - defspan dump(adapter_meta, path, opts) do - super(adapter_meta, path, opts) - end - - @impl true - defspan load(adapter_meta, path, opts) do - super(adapter_meta, path, opts) - end - - ## Nebulex.Adapter.Transaction - - @impl true - defspan transaction(adapter_meta, opts, fun) do - super(adapter_meta, opts, fun) - end - - @impl true - defspan in_transaction?(adapter_meta) do - super(adapter_meta) - end - - ## Nebulex.Adapter.Stats - - @impl true - defspan stats(adapter_meta) do - if stats = super(adapter_meta) do - %{stats | metadata: Map.put(stats.metadata, :started_at, adapter_meta.started_at)} - end - end - - ## Helpers - - defp list_gen(meta_tab) do - Metadata.fetch!(meta_tab, :generations) - end - - defp newer_gen(meta_tab) do - meta_tab - |> Metadata.fetch!(:generations) - |> hd() - end - - defp get_entry(tab, key, default, backend) do - case backend.lookup(tab, key) do - [] -> default - [entry] -> entry - entries -> entries - end - end - - defp pop_entry(tab, key, default, backend) do - case backend.take(tab, key) do - [] -> default - [entry] -> entry - entries -> entries - end - end - - defp put_entries(meta_tab, backend, entries, batch_size \\ 0) - - defp put_entries(meta_tab, backend, entries, batch_size) when is_list(entries) do - do_put_entries(meta_tab, backend, entries, fn older_gen -> - keys = Enum.map(entries, fn entry(key: key) -> key end) - - do_delete_all(backend, older_gen, keys, batch_size) - end) - end - - defp put_entries(meta_tab, backend, entry(key: key) = entry, _batch_size) do - do_put_entries(meta_tab, backend, entry, fn older_gen -> - true = backend.delete(older_gen, key) - end) - end - - defp do_put_entries(meta_tab, backend, entry_or_entries, purge_fun) do - case list_gen(meta_tab) do - [newer_gen] -> - backend.insert(newer_gen, entry_or_entries) - - [newer_gen, older_gen] -> - _ = purge_fun.(older_gen) - - backend.insert(newer_gen, entry_or_entries) - end - end - - defp put_new_entries(meta_tab, backend, entries, batch_size \\ 0) - - defp put_new_entries(meta_tab, backend, entries, batch_size) when is_list(entries) do - do_put_new_entries(meta_tab, backend, entries, fn newer_gen, older_gen -> - with true <- backend.insert_new(older_gen, entries) do - keys = Enum.map(entries, fn entry(key: key) -> key end) - - _ = do_delete_all(backend, older_gen, keys, batch_size) - - backend.insert_new(newer_gen, entries) - end - end) - end - - defp put_new_entries(meta_tab, backend, entry(key: key) = entry, _batch_size) do - do_put_new_entries(meta_tab, backend, entry, fn newer_gen, older_gen -> - with true <- backend.insert_new(older_gen, entry) do - true = backend.delete(older_gen, key) - - backend.insert_new(newer_gen, entry) - end - end) - end - - defp do_put_new_entries(meta_tab, backend, entry_or_entries, purge_fun) do - case list_gen(meta_tab) do - [newer_gen] -> - backend.insert_new(newer_gen, entry_or_entries) - - [newer_gen, older_gen] -> - purge_fun.(newer_gen, older_gen) - end - end - - defp update_entry(meta_tab, backend, key, updates) do - case list_gen(meta_tab) do - [newer_gen] -> - backend.update_element(newer_gen, key, updates) - - [newer_gen, older_gen] -> - with false <- backend.update_element(newer_gen, key, updates), - entry() = entry <- pop_entry(older_gen, key, false, backend) do - entry = - Enum.reduce(updates, entry, fn - {3, value}, acc -> entry(acc, value: value) - {4, value}, acc -> entry(acc, touched: value) - {5, value}, acc -> entry(acc, ttl: value) - end) - - backend.insert(newer_gen, entry) - end - end - end - - defp do_delete_all(backend, tab, keys, batch_size) do - do_delete_all(backend, tab, keys, batch_size, 0) - end - - defp do_delete_all(backend, tab, [key], _batch_size, deleted) do - true = backend.delete(tab, key) - - deleted + 1 - end - - defp do_delete_all(backend, tab, [k1, k2 | keys], batch_size, deleted) do - k1 = if is_tuple(k1), do: tuple_to_match_spec(k1), else: k1 - k2 = if is_tuple(k2), do: tuple_to_match_spec(k2), else: k2 - - do_delete_all( - backend, - tab, - keys, - batch_size, - deleted, - 2, - {:orelse, {:==, :"$1", k1}, {:==, :"$1", k2}} - ) - end - - defp do_delete_all(backend, tab, [], _batch_size, deleted, _count, acc) do - backend.select_delete(tab, delete_all_match_spec(acc)) + deleted - end - - defp do_delete_all(backend, tab, keys, batch_size, deleted, count, acc) - when count >= batch_size do - deleted = backend.select_delete(tab, delete_all_match_spec(acc)) + deleted - - do_delete_all(backend, tab, keys, batch_size, deleted) - end - - defp do_delete_all(backend, tab, [k | keys], batch_size, deleted, count, acc) do - k = if is_tuple(k), do: tuple_to_match_spec(k), else: k - - do_delete_all( - backend, - tab, - keys, - batch_size, - deleted, - count + 1, - {:orelse, acc, {:==, :"$1", k}} - ) - end - - defp tuple_to_match_spec(data) do - data - |> :erlang.tuple_to_list() - |> tuple_to_match_spec([]) - end - - defp tuple_to_match_spec([], acc) do - {acc |> Enum.reverse() |> :erlang.list_to_tuple()} - end - - defp tuple_to_match_spec([e | tail], acc) do - e = if is_tuple(e), do: tuple_to_match_spec(e), else: e - - tuple_to_match_spec(tail, [e | acc]) - end - - defp return(entry_or_entries, field \\ nil) - - defp return(nil, _field), do: nil - defp return(:"$expired", _field), do: :"$expired" - defp return(entry(value: value), :value), do: value - defp return(entry(key: _) = entry, _field), do: entry - - defp return(entries, field) when is_list(entries) do - Enum.map(entries, &return(&1, field)) - end - - defp validate_ttl(nil, _, _), do: nil - defp validate_ttl(entry(ttl: :infinity) = entry, _, _), do: entry - - defp validate_ttl(entry(key: key, touched: touched, ttl: ttl) = entry, gen, backend) do - if Time.now() - touched >= ttl do - true = backend.delete(gen, key) - :"$expired" - else - entry - end - end - - defp validate_ttl(entries, gen, backend) when is_list(entries) do - Enum.filter(entries, fn entry -> - not is_nil(validate_ttl(entry, gen, backend)) - end) - end - - defp handle_expired(:"$expired"), do: nil - defp handle_expired(result), do: result - - defp validate_match_spec(spec, opts) when spec in [nil, :unexpired, :expired] do - [ - { - entry(key: :"$1", value: :"$2", touched: :"$3", ttl: :"$4"), - if(spec = comp_match_spec(spec), do: [spec], else: []), - ret_match_spec(opts) - } - ] - end - - defp validate_match_spec(spec, _opts) do - case :ets.test_ms(test_ms(), spec) do - {:ok, _result} -> - spec - - {:error, _result} -> - raise Nebulex.QueryError, message: "invalid match spec", query: spec - end - end - - defp comp_match_spec(nil), - do: nil - - defp comp_match_spec(:unexpired), - do: {:orelse, {:==, :"$4", :infinity}, {:<, {:-, Time.now(), :"$3"}, :"$4"}} - - defp comp_match_spec(:expired), - do: {:not, comp_match_spec(:unexpired)} - - defp ret_match_spec(opts) do - case Keyword.get(opts, :return, :key) do - :key -> [:"$1"] - :value -> [:"$2"] - {:key, :value} -> [{{:"$1", :"$2"}}] - :entry -> [%Entry{key: :"$1", value: :"$2", touched: :"$3", ttl: :"$4"}] - end - end - - defp maybe_match_spec_return_true([{pattern, conds, _ret}], operation) - when operation in [:delete_all, :count_all] do - [{pattern, conds, [true]}] - end - - defp maybe_match_spec_return_true(match_spec, _operation) do - match_spec - end - - defp delete_all_match_spec(conds) do - [ - { - entry(key: :"$1", value: :"$2", touched: :"$3", ttl: :"$4"), - [conds], - [true] - } - ] - end - - defp test_ms, do: entry(key: 1, value: 1, touched: Time.now(), ttl: 1000) -end diff --git a/lib/nebulex/adapters/local/backend.ex b/lib/nebulex/adapters/local/backend.ex deleted file mode 100644 index abea8f88..00000000 --- a/lib/nebulex/adapters/local/backend.ex +++ /dev/null @@ -1,81 +0,0 @@ -defmodule Nebulex.Adapters.Local.Backend do - @moduledoc false - - @doc false - defmacro __using__(_opts) do - quote do - import Nebulex.Helpers - - alias Nebulex.Adapters.Local.Generation - - defp generation_spec(opts) do - %{ - id: Module.concat([__MODULE__, GC]), - start: {Generation, :start_link, [opts]} - } - end - - defp sup_spec(children) do - %{ - id: Module.concat([__MODULE__, Supervisor]), - start: {Supervisor, :start_link, [children, [strategy: :one_for_all]]}, - type: :supervisor - } - end - - defp parse_opts(opts, extra \\ []) do - type = get_option(opts, :backend_type, "an atom", &is_atom/1, :set) - - compressed = - case get_option(opts, :compressed, "boolean", &is_boolean/1, false) do - true -> [:compressed] - false -> [] - end - - backend_opts = - [ - type, - :public, - {:keypos, 2}, - {:read_concurrency, - get_option(opts, :read_concurrency, "boolean", &is_boolean/1, true)}, - {:write_concurrency, - get_option(opts, :write_concurrency, "boolean", &is_boolean/1, true)}, - compressed, - extra - ] - |> List.flatten() - |> Enum.filter(&(&1 != :named_table)) - - Keyword.put(opts, :backend_opts, backend_opts) - end - end - end - - @doc """ - Helper function for returning the child spec for the given backend. - """ - def child_spec(backend, opts) do - get_mod(backend).child_spec(opts) - end - - @doc """ - Helper function for creating a new table for the given backend. - """ - def new(backend, meta_tab, tab_opts) do - get_mod(backend).new(meta_tab, tab_opts) - end - - @doc """ - Helper function for deleting a table for the given backend. - """ - def delete(backend, meta_tab, gen_tab) do - get_mod(backend).delete(meta_tab, gen_tab) - end - - defp get_mod(:ets), do: Nebulex.Adapters.Local.Backend.ETS - - if Code.ensure_loaded?(:shards) do - defp get_mod(:shards), do: Nebulex.Adapters.Local.Backend.Shards - end -end diff --git a/lib/nebulex/adapters/local/backend/ets.ex b/lib/nebulex/adapters/local/backend/ets.ex deleted file mode 100644 index d552447d..00000000 --- a/lib/nebulex/adapters/local/backend/ets.ex +++ /dev/null @@ -1,25 +0,0 @@ -defmodule Nebulex.Adapters.Local.Backend.ETS do - @moduledoc false - use Nebulex.Adapters.Local.Backend - - ## API - - @doc false - def child_spec(opts) do - opts - |> parse_opts() - |> generation_spec() - |> List.wrap() - |> sup_spec() - end - - @doc false - def new(_meta_tab, tab_opts) do - :ets.new(__MODULE__, tab_opts) - end - - @doc false - def delete(_meta_tab, gen_tab) do - :ets.delete(gen_tab) - end -end diff --git a/lib/nebulex/adapters/local/backend/shards.ex b/lib/nebulex/adapters/local/backend/shards.ex deleted file mode 100644 index 949863a5..00000000 --- a/lib/nebulex/adapters/local/backend/shards.ex +++ /dev/null @@ -1,87 +0,0 @@ -if Code.ensure_loaded?(:shards) do - defmodule Nebulex.Adapters.Local.Backend.Shards do - @moduledoc false - - defmodule __MODULE__.DynamicSupervisor do - @moduledoc false - use DynamicSupervisor - - alias Nebulex.Adapters.Local.Metadata - - ## API - - @doc false - def start_link(tab) do - DynamicSupervisor.start_link(__MODULE__, tab) - end - - ## DynamicSupervisor Callbacks - - @impl true - def init(meta_tab) do - :ok = Metadata.put(meta_tab, :shards_sup, self()) - DynamicSupervisor.init(strategy: :one_for_one) - end - end - - use Nebulex.Adapters.Local.Backend - - alias Nebulex.Adapters.Local.Metadata - - ## API - - @doc false - def child_spec(opts) do - partitions = - get_option( - opts, - :partitions, - "an integer > 0", - &(is_integer(&1) and &1 > 0), - System.schedulers_online() - ) - - meta_tab = - opts - |> Keyword.fetch!(:adapter_meta) - |> Map.fetch!(:meta_tab) - - sup_spec([ - {__MODULE__.DynamicSupervisor, meta_tab}, - generation_spec(parse_opts(opts, partitions: partitions)) - ]) - end - - @doc false - def new(meta_tab, tab_opts) do - {:ok, _pid, tab} = - meta_tab - |> Metadata.get(:shards_sup) - |> DynamicSupervisor.start_child(table_spec(tab_opts)) - - tab - end - - @doc false - def delete(meta_tab, gen_tab) do - meta_tab - |> Metadata.get(:shards_sup) - |> DynamicSupervisor.terminate_child(:shards_meta.tab_pid(gen_tab)) - end - - @doc false - def start_table(opts) do - tab = :shards.new(__MODULE__, opts) - pid = :shards_meta.tab_pid(tab) - {:ok, pid, tab} - end - - defp table_spec(opts) do - %{ - id: __MODULE__, - start: {__MODULE__, :start_table, [opts]}, - type: :supervisor - } - end - end -end diff --git a/lib/nebulex/adapters/local/generation.ex b/lib/nebulex/adapters/local/generation.ex deleted file mode 100644 index b986cbd5..00000000 --- a/lib/nebulex/adapters/local/generation.ex +++ /dev/null @@ -1,591 +0,0 @@ -defmodule Nebulex.Adapters.Local.Generation do - @moduledoc """ - Generational garbage collection process. - - The generational garbage collector manage the heap as several sub-heaps, - known as generations, based on age of the objects. An object is allocated - in the youngest generation, sometimes called the nursery, and is promoted - to an older generation if its lifetime exceeds the threshold of its current - generation (defined by option `:gc_interval`). Every time the GC runs - (triggered by `:gc_interval` timeout), a new cache generation is created - and the oldest one is deleted. - - The deletion of the oldest generation happens in two steps. First, the - underlying ets table is flushed to release space and only marked for deletion - as there may still be processes referencing it. The actual deletion of the - ets table happens at next GC run. - - However, flushing is a blocking operation, once started, processes wanting - to access the table will need to wait until it finishes. To circumvent this, - flushing can be delayed by configuring `:gc_flush_delay` to allow time for - these processes to finish their work without being accidentally blocked. - - The only way to create new generations is through this module (this server - is the metadata owner) calling `new/2` function. When a Cache is created, - a generational garbage collector is attached to it automatically, - therefore, this server MUST NOT be started directly. - - ## Options - - These options are configured through the `Nebulex.Adapters.Local` adapter: - - * `:gc_interval` - If it is set, an integer > 0 is expected defining the - interval time in milliseconds to garbage collection to run, delete the - oldest generation and create a new one. If this option is not set, - garbage collection is never executed, so new generations must be - created explicitly, e.g.: `MyCache.new_generation(opts)`. - - * `:max_size` - If it is set, an integer > 0 is expected defining the - max number of cached entries (cache limit). If it is not set (`nil`), - the check to release memory is not performed (the default). - - * `:allocated_memory` - If it is set, an integer > 0 is expected defining - the max size in bytes allocated for a cache generation. When this option - is set and the configured value is reached, a new cache generation is - created so the oldest is deleted and force releasing memory space. - If it is not set (`nil`), the cleanup check to release memory is - not performed (the default). - - * `:gc_cleanup_min_timeout` - An integer > 0 defining the min timeout in - milliseconds for triggering the next cleanup and memory check. This will - be the timeout to use when either the max size or max allocated memory - is reached. Defaults to `10_000` (10 seconds). - - * `:gc_cleanup_max_timeout` - An integer > 0 defining the max timeout in - milliseconds for triggering the next cleanup and memory check. This is - the timeout used when the cache starts and there are few entries or the - consumed memory is near to `0`. Defaults to `600_000` (10 minutes). - - * `:gc_flush_delay` - If it is set, an integer > 0 is expected defining the - delay in milliseconds before objects from the oldest generation are - flushed. Defaults to `10_000` (10 seconds). - - """ - - # State - defstruct [ - :cache, - :name, - :telemetry, - :telemetry_prefix, - :meta_tab, - :backend, - :backend_opts, - :stats_counter, - :gc_interval, - :gc_heartbeat_ref, - :max_size, - :allocated_memory, - :gc_cleanup_min_timeout, - :gc_cleanup_max_timeout, - :gc_cleanup_ref, - :gc_flush_delay - ] - - use GenServer - - import Nebulex.Helpers - - alias Nebulex.Adapter - alias Nebulex.Adapter.Stats - alias Nebulex.Adapters.Local - alias Nebulex.Adapters.Local.{Backend, Metadata} - alias Nebulex.Telemetry - alias Nebulex.Telemetry.StatsHandler - - @type t :: %__MODULE__{} - @type server_ref :: pid | atom | :ets.tid() - @type opts :: Nebulex.Cache.opts() - - ## API - - @doc """ - Starts the garbage collector for the built-in local cache adapter. - """ - @spec start_link(opts) :: GenServer.on_start() - def start_link(opts) do - GenServer.start_link(__MODULE__, opts) - end - - @doc """ - Creates a new cache generation. Once the max number of generations - is reached, when a new generation is created, the oldest one is - deleted. - - ## Options - - * `:reset_timer` - Indicates if the poll frequency time-out should - be reset or not (default: true). - - ## Example - - Nebulex.Adapters.Local.Generation.new(MyCache) - - Nebulex.Adapters.Local.Generation.new(MyCache, reset_timer: false) - """ - @spec new(server_ref, opts) :: [atom] - def new(server_ref, opts \\ []) do - reset_timer? = get_option(opts, :reset_timer, "boolean", &is_boolean/1, true) - do_call(server_ref, {:new_generation, reset_timer?}) - end - - @doc """ - Removes or flushes all entries from the cache (including all its generations). - - ## Example - - Nebulex.Adapters.Local.Generation.delete_all(MyCache) - """ - @spec delete_all(server_ref) :: integer - def delete_all(server_ref) do - do_call(server_ref, :delete_all) - end - - @doc """ - Reallocates the block of memory that was previously allocated for the given - `server_ref` with the new `size`. In other words, reallocates the max memory - size for a cache generation. - - ## Example - - Nebulex.Adapters.Local.Generation.realloc(MyCache, 1_000_000) - """ - @spec realloc(server_ref, pos_integer) :: :ok - def realloc(server_ref, size) do - do_call(server_ref, {:realloc, size}) - end - - @doc """ - Returns the memory info in a tuple form `{used_mem, total_mem}`. - - ## Example - - Nebulex.Adapters.Local.Generation.memory_info(MyCache) - """ - @spec memory_info(server_ref) :: {used_mem :: non_neg_integer, total_mem :: non_neg_integer} - def memory_info(server_ref) do - do_call(server_ref, :memory_info) - end - - @doc """ - Resets the timer for pushing new cache generations. - - ## Example - - Nebulex.Adapters.Local.Generation.reset_timer(MyCache) - """ - def reset_timer(server_ref) do - server_ref - |> server() - |> GenServer.cast(:reset_timer) - end - - @doc """ - Returns the list of the generations in the form `[newer, older]`. - - ## Example - - Nebulex.Adapters.Local.Generation.list(MyCache) - """ - @spec list(server_ref) :: [:ets.tid()] - def list(server_ref) do - server_ref - |> get_meta_tab() - |> Metadata.get(:generations, []) - end - - @doc """ - Returns the newer generation. - - ## Example - - Nebulex.Adapters.Local.Generation.newer(MyCache) - """ - @spec newer(server_ref) :: :ets.tid() - def newer(server_ref) do - server_ref - |> get_meta_tab() - |> Metadata.get(:generations, []) - |> hd() - end - - @doc """ - Returns the PID of the GC server for the given `server_ref`. - - ## Example - - Nebulex.Adapters.Local.Generation.server(MyCache) - """ - @spec server(server_ref) :: pid - def server(server_ref) do - server_ref - |> get_meta_tab() - |> Metadata.fetch!(:gc_pid) - end - - @doc """ - A convenience function for retrieving the state. - """ - @spec get_state(server_ref) :: t - def get_state(server_ref) do - server_ref - |> server() - |> GenServer.call(:get_state) - end - - defp do_call(tab, message) do - tab - |> server() - |> GenServer.call(message) - end - - defp get_meta_tab(server_ref) when is_atom(server_ref) or is_pid(server_ref) do - Adapter.with_meta(server_ref, fn _, %{meta_tab: meta_tab} -> - meta_tab - end) - end - - defp get_meta_tab(server_ref), do: server_ref - - ## GenServer Callbacks - - @impl true - def init(opts) do - # Trap exit signals to run cleanup process - _ = Process.flag(:trap_exit, true) - - # Initial state - state = struct(__MODULE__, parse_opts(opts)) - - # Init cleanup timer - cleanup_ref = - if state.max_size || state.allocated_memory, - do: start_timer(state.gc_cleanup_max_timeout, nil, :cleanup) - - # Timer ref - {:ok, ref} = - if state.gc_interval, - do: {new_gen(state), start_timer(state.gc_interval)}, - else: {new_gen(state), nil} - - # Update state - state = %{state | gc_cleanup_ref: cleanup_ref, gc_heartbeat_ref: ref} - - {:ok, state, {:continue, :attach_stats_handler}} - end - - defp parse_opts(opts) do - # Get adapter metadata - adapter_meta = Keyword.fetch!(opts, :adapter_meta) - - # Add the GC PID to the meta table - meta_tab = Map.fetch!(adapter_meta, :meta_tab) - :ok = Metadata.put(meta_tab, :gc_pid, self()) - - # Common validators - pos_integer = &(is_integer(&1) and &1 > 0) - pos_integer_or_nil = &((is_integer(&1) and &1 > 0) or is_nil(&1)) - - Map.merge(adapter_meta, %{ - backend_opts: Keyword.get(opts, :backend_opts, []), - gc_interval: get_option(opts, :gc_interval, "an integer > 0", pos_integer_or_nil), - max_size: get_option(opts, :max_size, "an integer > 0", pos_integer_or_nil), - allocated_memory: get_option(opts, :allocated_memory, "an integer > 0", pos_integer_or_nil), - gc_cleanup_min_timeout: - get_option(opts, :gc_cleanup_min_timeout, "an integer > 0", pos_integer, 10_000), - gc_cleanup_max_timeout: - get_option(opts, :gc_cleanup_max_timeout, "an integer > 0", pos_integer, 600_000), - gc_flush_delay: get_option(opts, :gc_flush_delay, "an integer > 0", pos_integer, 10_000) - }) - end - - @impl true - def handle_continue(:attach_stats_handler, %__MODULE__{stats_counter: nil} = state) do - {:noreply, state} - end - - def handle_continue(:attach_stats_handler, %__MODULE__{stats_counter: stats_counter} = state) do - _ = - Telemetry.attach_many( - stats_counter, - [state.telemetry_prefix ++ [:command, :stop]], - &StatsHandler.handle_event/4, - stats_counter - ) - - {:noreply, state} - end - - @impl true - def terminate(_reason, state) do - if ref = state.stats_counter, do: Telemetry.detach(ref) - end - - @impl true - def handle_call(:delete_all, _from, %__MODULE__{} = state) do - # Get current size - size = - state - |> Map.from_struct() - |> Local.execute(:count_all, nil, []) - - # Create new generation - :ok = new_gen(state) - - # Delete all objects - :ok = - state.meta_tab - |> list() - |> Enum.each(&state.backend.delete_all_objects(&1)) - - {:reply, size, %{state | gc_heartbeat_ref: maybe_reset_timer(true, state)}} - end - - def handle_call({:new_generation, reset_timer?}, _from, state) do - # Create new generation - :ok = new_gen(state) - - # Maybe reset heartbeat timer - heartbeat_ref = maybe_reset_timer(reset_timer?, state) - - {:reply, :ok, %{state | gc_heartbeat_ref: heartbeat_ref}} - end - - def handle_call( - :memory_info, - _from, - %__MODULE__{backend: backend, meta_tab: meta_tab, allocated_memory: allocated} = state - ) do - {:reply, {memory_info(backend, meta_tab), allocated}, state} - end - - def handle_call({:realloc, mem_size}, _from, state) do - {:reply, :ok, %{state | allocated_memory: mem_size}} - end - - def handle_call(:get_state, _from, state) do - {:reply, state, state} - end - - @impl true - def handle_cast(:reset_timer, state) do - {:noreply, %{state | gc_heartbeat_ref: maybe_reset_timer(true, state)}} - end - - @impl true - def handle_info( - :heartbeat, - %__MODULE__{ - gc_interval: gc_interval, - gc_heartbeat_ref: heartbeat_ref - } = state - ) do - # Create new generation - :ok = new_gen(state) - - # Reset heartbeat timer - heartbeat_ref = start_timer(gc_interval, heartbeat_ref) - - {:noreply, %{state | gc_heartbeat_ref: heartbeat_ref}} - end - - def handle_info(:cleanup, state) do - # Check size first, if the cleanup is done, skip checking the memory, - # otherwise, check the memory too. - {_, state} = - with {false, state} <- check_size(state) do - check_memory(state) - end - - {:noreply, state} - end - - def handle_info( - :flush_older_gen, - %__MODULE__{ - meta_tab: meta_tab, - backend: backend - } = state - ) do - if deprecated = Metadata.get(meta_tab, :deprecated) do - true = backend.delete_all_objects(deprecated) - end - - {:noreply, state} - end - - defp check_size(%__MODULE__{max_size: max_size} = state) when not is_nil(max_size) do - maybe_cleanup(:size, state) - end - - defp check_size(state) do - {false, state} - end - - defp check_memory(%__MODULE__{allocated_memory: allocated} = state) when not is_nil(allocated) do - maybe_cleanup(:memory, state) - end - - defp check_memory(state) do - {false, state} - end - - defp maybe_cleanup( - info, - %__MODULE__{ - cache: cache, - name: name, - gc_cleanup_ref: cleanup_ref, - gc_cleanup_min_timeout: min_timeout, - gc_cleanup_max_timeout: max_timeout, - gc_interval: gc_interval, - gc_heartbeat_ref: heartbeat_ref - } = state - ) do - case cleanup_info(info, state) do - {size, max_size} when size >= max_size -> - # Create a new generation - :ok = new_gen(state) - - # Purge expired entries - _ = cache.delete_all(:expired, dynamic_cache: name) - - # Reset the heartbeat timer - heartbeat_ref = start_timer(gc_interval, heartbeat_ref) - - # Reset the cleanup timer - cleanup_ref = - info - |> cleanup_info(state) - |> elem(0) - |> reset_cleanup_timer(max_size, min_timeout, max_timeout, cleanup_ref) - - {true, %{state | gc_heartbeat_ref: heartbeat_ref, gc_cleanup_ref: cleanup_ref}} - - {size, max_size} -> - # Reset the cleanup timer - cleanup_ref = reset_cleanup_timer(size, max_size, min_timeout, max_timeout, cleanup_ref) - - {false, %{state | gc_cleanup_ref: cleanup_ref}} - end - end - - defp cleanup_info(:size, %__MODULE__{backend: mod, meta_tab: tab, max_size: max}) do - {size_info(mod, tab), max} - end - - defp cleanup_info(:memory, %__MODULE__{backend: mod, meta_tab: tab, allocated_memory: max}) do - {memory_info(mod, tab), max} - end - - ## Private Functions - - defp new_gen(%__MODULE__{ - meta_tab: meta_tab, - backend: backend, - backend_opts: backend_opts, - stats_counter: stats_counter, - gc_flush_delay: gc_flush_delay - }) do - # Create new generation - gen_tab = Backend.new(backend, meta_tab, backend_opts) - - # Update generation list - case list(meta_tab) do - [newer, older] -> - # Since the older generation is deleted, update evictions count - :ok = Stats.incr(stats_counter, :evictions, backend.info(older, :size)) - - # Update generations - :ok = Metadata.put(meta_tab, :generations, [gen_tab, newer]) - - # Process the older generation: - # - Delete previously stored deprecated generation - # - Flush the older generation - # - Deprecate it (mark it for deletion) - :ok = process_older_gen(meta_tab, backend, older, gc_flush_delay) - - [newer] -> - # Update generations - :ok = Metadata.put(meta_tab, :generations, [gen_tab, newer]) - - [] -> - # Update generations - :ok = Metadata.put(meta_tab, :generations, [gen_tab]) - end - end - - # The older generation cannot be removed immediately because there may be - # ongoing operations using it, then it may cause race-condition errors. - # Hence, the idea is to keep it alive till a new generation is pushed, but - # flushing its data before so that we release memory space. By the time a new - # generation is pushed, then it is safe to delete it completely. - defp process_older_gen(meta_tab, backend, older, gc_flush_delay) do - if deprecated = Metadata.get(meta_tab, :deprecated) do - # Delete deprecated generation if it does exist - _ = Backend.delete(backend, meta_tab, deprecated) - end - - # Flush older generation to release space so it can be marked for deletion - Process.send_after(self(), :flush_older_gen, gc_flush_delay) - - # Keep alive older generation reference into the metadata - Metadata.put(meta_tab, :deprecated, older) - end - - defp start_timer(time, ref \\ nil, event \\ :heartbeat) - - defp start_timer(nil, _, _), do: nil - - defp start_timer(time, ref, event) do - _ = if ref, do: Process.cancel_timer(ref) - Process.send_after(self(), event, time) - end - - defp maybe_reset_timer(_, %__MODULE__{gc_interval: nil} = state) do - state.gc_heartbeat_ref - end - - defp maybe_reset_timer(false, state) do - state.gc_heartbeat_ref - end - - defp maybe_reset_timer(true, %__MODULE__{} = state) do - start_timer(state.gc_interval, state.gc_heartbeat_ref) - end - - defp reset_cleanup_timer(size, max_size, min_timeout, max_timeout, cleanup_ref) do - size - |> linear_inverse_backoff(max_size, min_timeout, max_timeout) - |> start_timer(cleanup_ref, :cleanup) - end - - defp size_info(backend, meta_tab) do - meta_tab - |> list() - |> Enum.reduce(0, &(backend.info(&1, :size) + &2)) - end - - defp memory_info(backend, meta_tab) do - meta_tab - |> list() - |> Enum.reduce(0, fn gen, acc -> - gen - |> backend.info(:memory) - |> Kernel.*(:erlang.system_info(:wordsize)) - |> Kernel.+(acc) - end) - end - - defp linear_inverse_backoff(size, _max_size, _min_timeout, max_timeout) when size <= 0 do - max_timeout - end - - defp linear_inverse_backoff(size, max_size, min_timeout, _max_timeout) when size >= max_size do - min_timeout - end - - defp linear_inverse_backoff(size, max_size, min_timeout, max_timeout) do - round((min_timeout - max_timeout) / max_size * size + max_timeout) - end -end diff --git a/lib/nebulex/adapters/local/metadata.ex b/lib/nebulex/adapters/local/metadata.ex deleted file mode 100644 index be232bb7..00000000 --- a/lib/nebulex/adapters/local/metadata.ex +++ /dev/null @@ -1,28 +0,0 @@ -defmodule Nebulex.Adapters.Local.Metadata do - @moduledoc false - - @type tab :: :ets.tid() | atom - - @spec init :: tab - def init do - :ets.new(__MODULE__, [:public, read_concurrency: true]) - end - - @spec get(tab, term, term) :: term - def get(tab, key, default \\ nil) do - :ets.lookup_element(tab, key, 2) - rescue - ArgumentError -> default - end - - @spec fetch!(tab, term) :: term - def fetch!(tab, key) do - :ets.lookup_element(tab, key, 2) - end - - @spec put(tab, term, term) :: :ok - def put(tab, key, value) do - true = :ets.insert(tab, {key, value}) - :ok - end -end diff --git a/lib/nebulex/adapters/multilevel.ex b/lib/nebulex/adapters/multilevel.ex deleted file mode 100644 index 8fce5453..00000000 --- a/lib/nebulex/adapters/multilevel.ex +++ /dev/null @@ -1,610 +0,0 @@ -defmodule Nebulex.Adapters.Multilevel do - @moduledoc ~S""" - Adapter module for Multi-level Cache. - - This is just a simple layer on top of local or distributed cache - implementations that enables to have a cache hierarchy by levels. - Multi-level caches generally operate by checking the fastest, - level 1 (L1) cache first; if it hits, the adapter proceeds at - high speed. If that first cache misses, the next fastest cache - (level 2, L2) is checked, and so on, before accessing external - memory (that can be handled by a `cacheable` decorator). - - For write functions, the "Write Through" policy is applied by default; - this policy ensures that the data is stored safely as it is written - throughout the hierarchy. However, it is possible to force the write - operation in a specific level (although it is not recommended) via - `level` option, where the value is a positive integer greater than 0. - - We can define a multi-level cache as follows: - - defmodule MyApp.Multilevel do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Multilevel - - defmodule L1 do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - - defmodule L2 do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Partitioned - end - end - - Where the configuration for the cache and its levels must be in your - application environment, usually defined in your `config/config.exs`: - - config :my_app, MyApp.Multilevel, - model: :inclusive, - levels: [ - { - MyApp.Multilevel.L1, - gc_interval: :timer.hours(12), - backend: :shards - }, - { - MyApp.Multilevel.L2, - primary: [ - gc_interval: :timer.hours(12), - backend: :shards - ] - } - ] - - If your application was generated with a supervisor (by passing `--sup` - to `mix new`) you will have a `lib/my_app/application.ex` file containing - the application start callback that defines and starts your supervisor. - You just need to edit the `start/2` function to start the cache as a - supervisor on your application's supervisor: - - def start(_type, _args) do - children = [ - {MyApp.Multilevel, []}, - ... - ] - - See `Nebulex.Cache` for more information. - - ## Options - - This adapter supports the following options and all of them can be given via - the cache configuration: - - * `:levels` - This option is to define the levels, a list of tuples - `{cache_level :: Nebulex.Cache.t(), opts :: Keyword.t()}`, where - the first element is the module that defines the cache for that - level, and the second one is the options that will be passed to - that level in the `start/link/1` (which depends on the adapter - this level is using). The order in which the levels are defined - is the same the multi-level cache will use. For example, the first - cache in the list will be the L1 cache (level 1) and so on; - the Nth element will be the LN cache. This option is mandatory, - if it is not set or empty, an exception will be raised. - - * `:model` - Specifies the cache model: `:inclusive` or `:exclusive`; - defaults to `:inclusive`. In an inclusive cache, the same data can be - present in all caches/levels. In an exclusive cache, data can be present - in only one cache/level and a key cannot be found in the rest of caches - at the same time. This option affects `get` operation only; if - `:cache_model` is `:inclusive`, when the key is found in a level N, - that entry is duplicated backwards (to all previous levels: 1..N-1). - - ## Shared options - - Almost all of the cache functions outlined in `Nebulex.Cache` module - accept the following options: - - * `:level` - It may be an integer greater than 0 that specifies the cache - level where the operation will take place. By default, the evaluation - is performed throughout the whole cache hierarchy (all levels). - - ## Telemetry events - - This adapter emits all recommended Telemetry events, and documented - in `Nebulex.Cache` module (see **"Adapter-specific events"** section). - - Since the multi-level adapter is a layer/wrapper on top of other existing - adapters, each cache level may Telemetry emit events independently. - For example, for the cache defined before `MyApp.Multilevel`, the next - events will be emitted for the main multi-level cache: - - * `[:my_app, :multilevel, :command, :start]` - * `[:my_app, :multilevel, :command, :stop]` - * `[:my_app, :multilevel, :command, :exception]` - - For the L1 (configured with the local adapter): - - * `[:my_app, :multilevel, :l1, :command, :start]` - * `[:my_app, :multilevel, :l1, :command, :stop]` - * `[:my_app, :multilevel, :l1, :command, :exception]` - - For the L2 (configured with the partitioned adapter): - - * `[:my_app, :multilevel, :l2, :command, :start]` - * `[:my_app, :multilevel, :l2, :primary, :command, :start]` - * `[:my_app, :multilevel, :l2, :command, :stop]` - * `[:my_app, :multilevel, :l2, :primary, :command, :stop]` - * `[:my_app, :multilevel, :l2, :command, :exception]` - * `[:my_app, :multilevel, :l2, :primary, :command, :exception]` - - See also the [Telemetry guide](http://hexdocs.pm/nebulex/telemetry.html) - for more information and examples. - - ## Stats - - Since the multi-level adapter works as a wrapper for the configured cache - levels, the support for stats depends on the underlying levels. Also, the - measurements are consolidated per level, they are not aggregated. For example, - if we enable the stats for the multi-level cache defined previously and run: - - MyApp.Multilevel.stats() - - The returned stats will look like: - - %Nebulex.Stats{ - measurements: %{ - l1: %{evictions: 0, expirations: 0, hits: 0, misses: 0, writes: 0}, - l2: %{evictions: 0, expirations: 0, hits: 0, misses: 0, writes: 0} - }, - metadata: %{ - l1: %{ - cache: NMyApp.Multilevel.L1, - started_at: ~U[2021-01-10 13:06:04.075084Z] - }, - l2: %{ - cache: MyApp.Multilevel.L2.Primary, - started_at: ~U[2021-01-10 13:06:04.089888Z] - }, - cache: MyApp.Multilevel, - started_at: ~U[2021-01-10 13:06:04.066750Z] - } - } - - **IMPORTANT:** Those cache levels with stats disabled won't be included - into the returned stats (they are skipped). If a cache level is using - an adapter that does not support stats, you may get unexpected errors. - Therefore, and as overall recommendation, check out the documentation - for adapters used by the underlying cache levels and ensure they - implement the `Nebulex.Adapter.Stats` behaviour. - - ### Stats with Telemetry - - In case you are using Telemetry metrics, you can define the metrics per - level, for example: - - last_value("nebulex.cache.stats.l1.hits", - event_name: "nebulex.cache.stats", - measurement: &get_in(&1, [:l1, :hits]), - tags: [:cache] - ) - last_value("nebulex.cache.stats.l1.misses", - event_name: "nebulex.cache.stats", - measurement: &get_in(&1, [:l1, :misses]), - tags: [:cache] - ) - - > See the section **"Instrumenting Multi-level caches"** in the - [Telemetry guide](http://hexdocs.pm/nebulex/telemetry.html) - for more information. - - ## Extended API - - This adapter provides one additional convenience function for retrieving - the cache model for the given cache `name`: - - MyCache.model() - MyCache.model(:cache_name) - - ## Caveats of multi-level adapter - - Because this adapter reuses other existing/configured adapters, it inherits - all their limitations too. Therefore, it is highly recommended to check the - documentation of the adapters to use. - """ - - # Provide Cache Implementation - @behaviour Nebulex.Adapter - @behaviour Nebulex.Adapter.Entry - @behaviour Nebulex.Adapter.Queryable - @behaviour Nebulex.Adapter.Stats - - # Inherit default transaction implementation - use Nebulex.Adapter.Transaction - - import Nebulex.Adapter - import Nebulex.Helpers - - alias Nebulex.Cache.Cluster - - # Multi-level Cache Models - @models [:inclusive, :exclusive] - - ## Nebulex.Adapter - - @impl true - defmacro __before_compile__(_env) do - quote do - @doc """ - A convenience function to get the cache model. - """ - def model(name \\ __MODULE__) do - with_meta(name, fn _adapter, %{model: model} -> - model - end) - end - end - end - - @impl true - def init(opts) do - # Required options - telemetry_prefix = Keyword.fetch!(opts, :telemetry_prefix) - telemetry = Keyword.fetch!(opts, :telemetry) - cache = Keyword.fetch!(opts, :cache) - name = opts[:name] || cache - - # Maybe use stats - stats = get_boolean_option(opts, :stats) - - # Get cache levels - levels = - get_option( - opts, - :levels, - "a list with at least one level definition", - &(Keyword.keyword?(&1) && length(&1) > 0) - ) - - # Get multilevel-cache model - model = get_option(opts, :model, ":inclusive or :exclusive", &(&1 in @models), :inclusive) - - # Build multi-level specs - {children, meta_list, _} = children(levels, telemetry_prefix, telemetry, stats) - - # Build adapter spec - child_spec = - Nebulex.Adapters.Supervisor.child_spec( - name: normalize_module_name([name, Supervisor]), - strategy: :one_for_one, - children: children - ) - - adapter_meta = %{ - telemetry_prefix: telemetry_prefix, - telemetry: telemetry, - name: name, - levels: meta_list, - model: model, - stats: stats, - started_at: DateTime.utc_now() - } - - {:ok, child_spec, adapter_meta} - end - - # sobelow_skip ["DOS.BinToAtom"] - defp children(levels, telemetry_prefix, telemetry, stats) do - levels - |> Enum.reverse() - |> Enum.reduce({[], [], length(levels)}, fn {l_cache, l_opts}, {child_acc, meta_acc, n} -> - l_opts = - Keyword.merge( - [ - telemetry_prefix: telemetry_prefix ++ [:"l#{n}"], - telemetry: telemetry, - stats: stats - ], - l_opts - ) - - meta = %{cache: l_cache, name: l_opts[:name]} - - {[{l_cache, l_opts} | child_acc], [meta | meta_acc], n - 1} - end) - end - - ## Nebulex.Adapter.Entry - - @impl true - defspan get(adapter_meta, key, opts) do - fun = fn level, {default, prev} -> - value = with_dynamic_cache(level, :get, [key, opts]) - - if is_nil(value) do - {:cont, {default, [level | prev]}} - else - {:halt, {value, [level | prev]}} - end - end - - opts - |> levels(adapter_meta.levels) - |> Enum.reduce_while({nil, []}, fun) - |> maybe_replicate(key, adapter_meta.model) - end - - @impl true - defspan get_all(adapter_meta, keys, opts) do - fun = fn level, {keys_acc, map_acc} -> - map = with_dynamic_cache(level, :get_all, [keys_acc, opts]) - map_acc = Map.merge(map_acc, map) - - case keys_acc -- Map.keys(map) do - [] -> {:halt, {[], map_acc}} - keys_acc -> {:cont, {keys_acc, map_acc}} - end - end - - opts - |> levels(adapter_meta.levels) - |> Enum.reduce_while({keys, %{}}, fun) - |> elem(1) - end - - @impl true - defspan put(adapter_meta, key, value, _ttl, on_write, opts) do - case on_write do - :put -> - :ok = eval(adapter_meta, :put, [key, value, opts], opts) - true - - :put_new -> - eval(adapter_meta, :put_new, [key, value, opts], opts) - - :replace -> - eval(adapter_meta, :replace, [key, value, opts], opts) - end - end - - @impl true - defspan put_all(adapter_meta, entries, _ttl, on_write, opts) do - action = if on_write == :put_new, do: :put_new_all, else: :put_all - - reducer = fn level, {_, level_acc} -> - case with_dynamic_cache(level, action, [entries, opts]) do - :ok -> - {:cont, {true, [level | level_acc]}} - - true -> - {:cont, {true, [level | level_acc]}} - - false -> - _ = delete_from_levels(level_acc, entries) - {:halt, {on_write == :put, level_acc}} - end - end - - opts - |> levels(adapter_meta.levels) - |> Enum.reduce_while({true, []}, reducer) - |> elem(0) - end - - @impl true - defspan delete(adapter_meta, key, opts) do - eval(adapter_meta, :delete, [key, opts], Keyword.put(opts, :reverse, true)) - end - - @impl true - defspan take(adapter_meta, key, opts) do - opts - |> levels(adapter_meta.levels) - |> do_take(nil, key, opts) - end - - defp do_take([], result, _key, _opts), do: result - - defp do_take([l_meta | rest], nil, key, opts) do - result = with_dynamic_cache(l_meta, :take, [key, opts]) - do_take(rest, result, key, opts) - end - - defp do_take(levels, result, key, _opts) do - _ = eval(levels, :delete, [key, []], reverse: true) - result - end - - @impl true - defspan has_key?(adapter_meta, key) do - eval_while(adapter_meta, :has_key?, [key], false) - end - - @impl true - defspan update_counter(adapter_meta, key, amount, _ttl, _default, opts) do - eval(adapter_meta, :incr, [key, amount, opts], opts) - end - - @impl true - defspan ttl(adapter_meta, key) do - eval_while(adapter_meta, :ttl, [key], nil) - end - - @impl true - defspan expire(adapter_meta, key, ttl) do - Enum.reduce(adapter_meta.levels, false, fn l_meta, acc -> - with_dynamic_cache(l_meta, :expire, [key, ttl]) or acc - end) - end - - @impl true - defspan touch(adapter_meta, key) do - Enum.reduce(adapter_meta.levels, false, fn l_meta, acc -> - with_dynamic_cache(l_meta, :touch, [key]) or acc - end) - end - - ## Nebulex.Adapter.Queryable - - @impl true - defspan execute(adapter_meta, operation, query, opts) do - {levels, reducer, acc_in} = - case operation do - :all -> {adapter_meta.levels, &(&1 ++ &2), []} - :delete_all -> {Enum.reverse(adapter_meta.levels), &(&1 + &2), 0} - _ -> {adapter_meta.levels, &(&1 + &2), 0} - end - - Enum.reduce(levels, acc_in, fn level, acc -> - level - |> with_dynamic_cache(operation, [query, opts]) - |> reducer.(acc) - end) - end - - @impl true - defspan stream(adapter_meta, query, opts) do - Stream.resource( - fn -> - adapter_meta.levels - end, - fn - [] -> - {:halt, []} - - [level | levels] -> - elements = - level - |> with_dynamic_cache(:stream, [query, opts]) - |> Enum.to_list() - - {elements, levels} - end, - & &1 - ) - end - - ## Nebulex.Adapter.Transaction - - @impl true - defspan transaction(adapter_meta, opts, fun) do - # Perhaps one of the levels is a distributed adapter, - # then ensure the lock on the right cluster nodes. - nodes = - adapter_meta.levels - |> Enum.reduce([node()], fn %{name: name, cache: cache}, acc -> - if cache.__adapter__ in [Nebulex.Adapters.Partitioned, Nebulex.Adapters.Replicated] do - Cluster.get_nodes(name || cache) ++ acc - else - acc - end - end) - |> Enum.uniq() - - super(adapter_meta, Keyword.put(opts, :nodes, nodes), fun) - end - - @impl true - defspan in_transaction?(adapter_meta) do - super(adapter_meta) - end - - ## Nebulex.Adapter.Stats - - @impl true - defspan stats(adapter_meta) do - if adapter_meta.stats do - init_acc = %Nebulex.Stats{ - metadata: %{ - cache: adapter_meta.name || adapter_meta.cache, - started_at: adapter_meta.started_at - } - } - - adapter_meta.levels - |> Enum.with_index(1) - |> Enum.reduce(init_acc, &update_stats/2) - end - end - - # We can safely disable this warning since the atom created dynamically is - # always re-used; the number of levels is limited and known before hand. - # sobelow_skip ["DOS.BinToAtom"] - defp update_stats({meta, idx}, stats_acc) do - if stats = with_dynamic_cache(meta, :stats, []) do - level_idx = :"l#{idx}" - measurements = Map.put(stats_acc.measurements, level_idx, stats.measurements) - metadata = Map.put(stats_acc.metadata, level_idx, stats.metadata) - %{stats_acc | measurements: measurements, metadata: metadata} - else - stats_acc - end - end - - ## Helpers - - defp with_dynamic_cache(%{cache: cache, name: nil}, action, args) do - apply(cache, action, args) - end - - defp with_dynamic_cache(%{cache: cache, name: name}, action, args) do - cache.with_dynamic_cache(name, fn -> - apply(cache, action, args) - end) - end - - defp eval(%{levels: levels}, fun, args, opts) do - eval(levels, fun, args, opts) - end - - defp eval(levels, fun, args, opts) when is_list(levels) do - opts - |> levels(levels) - |> eval(fun, args) - end - - defp eval([level_meta | next], fun, args) do - Enum.reduce(next, with_dynamic_cache(level_meta, fun, args), fn l_meta, acc -> - ^acc = with_dynamic_cache(l_meta, fun, args) - end) - end - - defp levels(opts, levels) do - levels = - case Keyword.get(opts, :level) do - nil -> levels - level -> [Enum.at(levels, level - 1)] - end - - if Keyword.get(opts, :reverse) do - Enum.reverse(levels) - else - levels - end - end - - defp eval_while(%{levels: levels}, fun, args, init) do - Enum.reduce_while(levels, init, fn level_meta, acc -> - if return = with_dynamic_cache(level_meta, fun, args), - do: {:halt, return}, - else: {:cont, acc} - end) - end - - defp delete_from_levels(levels, entries) do - for level_meta <- levels, {key, _} <- entries do - with_dynamic_cache(level_meta, :delete, [key, []]) - end - end - - defp maybe_replicate({nil, _}, _, _), do: nil - - defp maybe_replicate({value, [level_meta | [_ | _] = levels]}, key, :inclusive) do - ttl = with_dynamic_cache(level_meta, :ttl, [key]) || :infinity - - :ok = - Enum.each(levels, fn l_meta -> - _ = with_dynamic_cache(l_meta, :put, [key, value, [ttl: ttl]]) - end) - - value - end - - defp maybe_replicate({value, _levels}, _key, _model) do - value - end -end diff --git a/lib/nebulex/adapters/nil.ex b/lib/nebulex/adapters/nil.ex index 1203f47a..cb5d3cd4 100644 --- a/lib/nebulex/adapters/nil.ex +++ b/lib/nebulex/adapters/nil.ex @@ -62,68 +62,80 @@ defmodule Nebulex.Adapters.Nil do # Provide Cache Implementation @behaviour Nebulex.Adapter - @behaviour Nebulex.Adapter.Entry + @behaviour Nebulex.Adapter.KV @behaviour Nebulex.Adapter.Queryable @behaviour Nebulex.Adapter.Persistence - @behaviour Nebulex.Adapter.Stats # Inherit default transaction implementation use Nebulex.Adapter.Transaction + # Inherit default info implementation + use Nebulex.Adapters.Common.Info + + import Nebulex.Utils, only: [wrap_error: 2] + + alias Nebulex.Adapters.Common.Info.Stats + ## Nebulex.Adapter @impl true defmacro __before_compile__(_env), do: :ok @impl true - def init(_opts) do - child_spec = Supervisor.child_spec({Agent, fn -> :ok end}, id: {Agent, 1}) - {:ok, child_spec, %{}} - end + def init(opts) do + child_spec = Supervisor.child_spec({Agent, fn -> :ok end}, id: Agent) - ## Nebulex.Adapter.Entry + {:ok, child_spec, %{stats_counter: Stats.init(opts)}} + end - @impl true - def get(_, _, _), do: nil + ## Nebulex.Adapter.KV @impl true - def get_all(_, _, _), do: %{} + def fetch(adapter_meta, key, _) do + wrap_error Nebulex.KeyError, key: key, cache: adapter_meta.cache + end @impl true - def put(_, _, _, _, _, _), do: true + def put(_, _, _, _, _, _), do: {:ok, true} @impl true - def put_all(_, _, _, _, _), do: true + def put_all(_, _, _, _, _), do: {:ok, true} @impl true def delete(_, _, _), do: :ok @impl true - def take(_, _, _), do: nil + def take(adapter_meta, key, _) do + wrap_error Nebulex.KeyError, key: key, cache: adapter_meta.cache + end @impl true - def has_key?(_, _), do: false + def has_key?(_, _, _), do: {:ok, false} @impl true - def ttl(_, _), do: nil + def ttl(adapter_meta, key, _opts) do + wrap_error Nebulex.KeyError, key: key, cache: adapter_meta.cache + end @impl true - def expire(_, _, _), do: true + def expire(_, _, _, _), do: {:ok, false} @impl true - def touch(_, _), do: true + def touch(_, _, _), do: {:ok, false} @impl true - def update_counter(_, _, amount, _, default, _), do: default + amount + def update_counter(_, _, amount, _, default, _) do + {:ok, default + amount} + end ## Nebulex.Adapter.Queryable @impl true - def execute(_, :all, _, _), do: [] - def execute(_, _, _, _), do: 0 + def execute(_, :get_all, _, _), do: {:ok, []} + def execute(_, _, _, _), do: {:ok, 0} @impl true - def stream(_, _, _), do: Stream.each([], & &1) + def stream(_, _, _), do: {:ok, Stream.each([], & &1)} ## Nebulex.Adapter.Persistence @@ -132,9 +144,4 @@ defmodule Nebulex.Adapters.Nil do @impl true def load(_, _, _), do: :ok - - ## Nebulex.Adapter.Stats - - @impl true - def stats(_), do: %Nebulex.Stats{} end diff --git a/lib/nebulex/adapters/partitioned.ex b/lib/nebulex/adapters/partitioned.ex deleted file mode 100644 index 173acde7..00000000 --- a/lib/nebulex/adapters/partitioned.ex +++ /dev/null @@ -1,857 +0,0 @@ -defmodule Nebulex.Adapters.Partitioned do - @moduledoc ~S""" - Built-in adapter for partitioned cache topology. - - ## Overall features - - * Partitioned cache topology (Sharding Distribution Model). - * Configurable primary storage adapter. - * Configurable Keyslot to distributed the keys across the cluster members. - * Support for transactions via Erlang global name registration facility. - * Stats support rely on the primary storage adapter. - - ## Partitioned Cache Topology - - There are several key points to consider about a partitioned cache: - - * _**Partitioned**_: The data in a distributed cache is spread out over - all the servers in such a way that no two servers are responsible for - the same piece of cached data. This means that the size of the cache - and the processing power associated with the management of the cache - can grow linearly with the size of the cluster. Also, it means that - operations against data in the cache can be accomplished with a - "single hop," in other words, involving at most one other server. - - * _**Load-Balanced**_: Since the data is spread out evenly over the - servers, the responsibility for managing the data is automatically - load-balanced across the cluster. - - * _**Ownership**_: Exactly one node in the cluster is responsible for each - piece of data in the cache. - - * _**Point-To-Point**_: The communication for the partitioned cache is all - point-to-point, enabling linear scalability. - - * _**Location Transparency**_: Although the data is spread out across - cluster nodes, the exact same API is used to access the data, and the - same behavior is provided by each of the API methods. This is called - location transparency, which means that the developer does not have to - code based on the topology of the cache, since the API and its behavior - will be the same with a local cache, a replicated cache, or a distributed - cache. - - * _**Failover**_: Failover of a distributed cache involves promoting backup - data to be primary storage. When a cluster node fails, all remaining - cluster nodes determine what data each holds in backup that the failed - cluster node had primary responsible for when it died. Those data becomes - the responsibility of whatever cluster node was the backup for the data. - However, this adapter does not provide fault-tolerance implementation, - each piece of data is kept in a single node/machine (via sharding), then, - if a node fails, the data kept by this node won't be available for the - rest of the cluster members. - - > Based on **"Distributed Caching Essential Lessons"** by **Cameron Purdy** - and [Coherence Partitioned Cache Service][oracle-pcs]. - - [oracle-pcs]: https://docs.oracle.com/cd/E13924_01/coh.340/e13819/partitionedcacheservice.htm - - ## Additional implementation notes - - `:pg2` or `:pg` (>= OTP 23) is used under-the-hood by the adapter to manage - the cluster nodes. When the partitioned cache is started in a node, it creates - a group and joins it (the cache supervisor PID is joined to the group). Then, - when a function is invoked, the adapter picks a node from the group members, - and then the function is executed on that specific node. In the same way, - when a partitioned cache supervisor dies (the cache is stopped or killed for - some reason), the PID of that process is automatically removed from the PG - group; this is why it's recommended to use consistent hashing for distributing - the keys across the cluster nodes. - - > **NOTE:** `pg2` will be replaced by `pg` in future, since the `pg2` module - is deprecated as of OTP 23 and scheduled for removal in OTP 24. - - This adapter depends on a local cache adapter (primary storage), it adds - a thin layer on top of it in order to distribute requests across a group - of nodes, where is supposed the local cache is running already. However, - you don't need to define any additional cache module for the primary - storage, instead, the adapter initializes it automatically (it adds the - primary storage as part of the supervision tree) based on the given - options within the `primary_storage_adapter:` argument. - - ## Usage - - When used, the Cache expects the `:otp_app` and `:adapter` as options. - The `:otp_app` should point to an OTP application that has the cache - configuration. For example: - - defmodule MyApp.PartitionedCache do - use Nebulex.Cache, - otp_app: :my_app, - adapter: Nebulex.Adapters.Partitioned - end - - Optionally, you can configure the desired primary storage adapter with the - option `:primary_storage_adapter`; defaults to `Nebulex.Adapters.Local`. - - defmodule MyApp.PartitionedCache do - use Nebulex.Cache, - otp_app: :my_app, - adapter: Nebulex.Adapters.Partitioned, - primary_storage_adapter: Nebulex.Adapters.Local - end - - Also, you can provide a custom keyslot function: - - defmodule MyApp.PartitionedCache do - use Nebulex.Cache, - otp_app: :my_app, - adapter: Nebulex.Adapters.Partitioned, - primary_storage_adapter: Nebulex.Adapters.Local - - @behaviour Nebulex.Adapter.Keyslot - - @impl true - def hash_slot(key, range) do - key - |> :erlang.phash2() - |> :jchash.compute(range) - end - end - - Where the configuration for the cache must be in your application environment, - usually defined in your `config/config.exs`: - - config :my_app, MyApp.PartitionedCache, - keyslot: MyApp.PartitionedCache, - primary: [ - gc_interval: 3_600_000, - backend: :shards - ] - - If your application was generated with a supervisor (by passing `--sup` - to `mix new`) you will have a `lib/my_app/application.ex` file containing - the application start callback that defines and starts your supervisor. - You just need to edit the `start/2` function to start the cache as a - supervisor on your application's supervisor: - - def start(_type, _args) do - children = [ - {MyApp.PartitionedCache, []}, - ... - ] - - See `Nebulex.Cache` for more information. - - ## Options - - This adapter supports the following options and all of them can be given via - the cache configuration: - - * `:primary` - The options that will be passed to the adapter associated - with the local primary storage. These options will depend on the local - adapter to use. - - * `:keyslot` - Defines the module implementing `Nebulex.Adapter.Keyslot` - behaviour. - - * `:task_supervisor_opts` - Start-time options passed to - `Task.Supervisor.start_link/1` when the adapter is initialized. - - * `:join_timeout` - Interval time in milliseconds for joining the - running partitioned cache to the cluster. This is to ensure it is - always joined. Defaults to `:timer.seconds(180)`. - - ## Shared options - - Almost all of the cache functions outlined in `Nebulex.Cache` module - accept the following options: - - * `:timeout` - The time-out value in milliseconds for the command that - will be executed. If the timeout is exceeded, then the current process - will exit. For executing a command on remote nodes, this adapter uses - `Task.await/2` internally for receiving the result, so this option tells - how much time the adapter should wait for it. If the timeout is exceeded, - the task is shut down but the current process doesn't exit, only the - result associated with that task is skipped in the reduce phase. - - ## Telemetry events - - This adapter emits all recommended Telemetry events, and documented - in `Nebulex.Cache` module (see **"Adapter-specific events"** section). - - Since the partitioned adapter depends on the configured primary storage - adapter (local cache adapter), this one may also emit Telemetry events. - Therefore, there will be events emitted by the partitioned adapter as well - as the primary storage adapter. For example, for the cache defined before - `MyApp.PartitionedCache`, these would be the emitted events: - - * `[:my_app, :partitioned_cache, :command, :start]` - * `[:my_app, :partitioned_cache, :primary, :command, :start]` - * `[:my_app, :partitioned_cache, :command, :stop]` - * `[:my_app, :partitioned_cache, :primary, :command, :stop]` - * `[:my_app, :partitioned_cache, :command, :exception]` - * `[:my_app, :partitioned_cache, :primary, :command, :exception]` - - As you may notice, the telemetry prefix by default for the partitioned cache - is `[:my_app, :partitioned_cache]`, and the prefix for its primary storage - `[:my_app, :partitioned_cache, :primary]`. - - See also the [Telemetry guide](http://hexdocs.pm/nebulex/telemetry.html) - for more information and examples. - - ## Adapter-specific telemetry events - - This adapter exposes following Telemetry events: - - * `telemetry_prefix ++ [:bootstrap, :started]` - Dispatched by the adapter - when the bootstrap process is started. - - * Measurements: `%{system_time: non_neg_integer}` - * Metadata: - - ``` - %{ - adapter_meta: %{optional(atom) => term}, - cluster_nodes: [node] - } - ``` - - * `telemetry_prefix ++ [:bootstrap, :stopped]` - Dispatched by the adapter - when the bootstrap process is stopped. - - * Measurements: `%{system_time: non_neg_integer}` - * Metadata: - - ``` - %{ - adapter_meta: %{optional(atom) => term}, - cluster_nodes: [node], - reason: term - } - ``` - - * `telemetry_prefix ++ [:bootstrap, :exit]` - Dispatched by the adapter - when the bootstrap has received an exit signal. - - * Measurements: `%{system_time: non_neg_integer}` - * Metadata: - - ``` - %{ - adapter_meta: %{optional(atom) => term}, - cluster_nodes: [node], - reason: term - } - ``` - - * `telemetry_prefix ++ [:bootstrap, :joined]` - Dispatched by the adapter - when the bootstrap has joined the cache to the cluster. - - * Measurements: `%{system_time: non_neg_integer}` - * Metadata: - - ``` - %{ - adapter_meta: %{optional(atom) => term}, - cluster_nodes: [node] - } - ``` - - ## Stats - - This adapter depends on the primary storage adapter for the stats support. - Therefore, it is important to ensure the underlying primary storage adapter - does support stats, otherwise, you may get unexpected errors. - - ## Extended API - - This adapter provides some additional convenience functions to the - `Nebulex.Cache` API. - - Retrieving the primary storage or local cache module: - - MyCache.__primary__() - - Retrieving the cluster nodes associated with the given cache `name`: - - MyCache.nodes() - - Get a cluster node based on the given `key`: - - MyCache.get_node("mykey") - - Joining the cache to the cluster: - - MyCache.join_cluster() - - Leaving the cluster (removes the cache from the cluster): - - MyCache.leave_cluster() - - ## Caveats of partitioned adapter - - For `c:Nebulex.Cache.get_and_update/3` and `c:Nebulex.Cache.update/4`, - they both have a parameter that is the anonymous function, and it is compiled - into the module where it is created, which means it necessarily doesn't exists - on remote nodes. To ensure they work as expected, you must provide functions - from modules existing in all nodes of the group. - """ - - # Provide Cache Implementation - @behaviour Nebulex.Adapter - @behaviour Nebulex.Adapter.Entry - @behaviour Nebulex.Adapter.Queryable - @behaviour Nebulex.Adapter.Stats - - # Inherit default transaction implementation - use Nebulex.Adapter.Transaction - - # Inherit default persistence implementation - use Nebulex.Adapter.Persistence - - # Inherit default keyslot implementation - use Nebulex.Adapter.Keyslot - - import Nebulex.Adapter - import Nebulex.Helpers - - alias Nebulex.Cache.Cluster - alias Nebulex.RPC - - ## Nebulex.Adapter - - @impl true - defmacro __before_compile__(env) do - otp_app = Module.get_attribute(env.module, :otp_app) - opts = Module.get_attribute(env.module, :opts) - primary = Keyword.get(opts, :primary_storage_adapter, Nebulex.Adapters.Local) - - quote do - defmodule Primary do - @moduledoc """ - This is the cache for the primary storage. - """ - use Nebulex.Cache, - otp_app: unquote(otp_app), - adapter: unquote(primary) - end - - @doc """ - A convenience function for getting the primary storage cache. - """ - def __primary__, do: Primary - - @doc """ - A convenience function for getting the cluster nodes. - """ - def nodes do - Cluster.get_nodes(get_dynamic_cache()) - end - - @doc """ - A convenience function to get the node of the given `key`. - """ - def get_node(key) do - with_meta(get_dynamic_cache(), fn _adapter, %{name: name, keyslot: keyslot} -> - Cluster.get_node(name, key, keyslot) - end) - end - - @doc """ - A convenience function for joining the cache to the cluster. - """ - def join_cluster do - Cluster.join(get_dynamic_cache()) - end - - @doc """ - A convenience function for removing the cache from the cluster. - """ - def leave_cluster do - Cluster.leave(get_dynamic_cache()) - end - end - end - - @impl true - def init(opts) do - # Required options - telemetry_prefix = Keyword.fetch!(opts, :telemetry_prefix) - telemetry = Keyword.fetch!(opts, :telemetry) - cache = Keyword.fetch!(opts, :cache) - name = opts[:name] || cache - - # Maybe use stats - stats = get_boolean_option(opts, :stats) - - # Primary cache options - primary_opts = - Keyword.merge( - [telemetry_prefix: telemetry_prefix ++ [:primary], telemetry: telemetry, stats: stats], - Keyword.get(opts, :primary, []) - ) - - # Maybe put a name to primary storage - primary_opts = - if opts[:name], - do: [name: normalize_module_name([name, Primary])] ++ primary_opts, - else: primary_opts - - # Keyslot module for selecting nodes - keyslot = - opts - |> get_option(:keyslot, "an atom", &is_atom/1, __MODULE__) - |> assert_behaviour(Nebulex.Adapter.Keyslot, "keyslot") - - # Maybe task supervisor for distributed tasks - {task_sup_name, children} = task_sup_child_spec(name, opts) - - # Prepare metadata - adapter_meta = %{ - telemetry_prefix: telemetry_prefix, - telemetry: telemetry, - name: name, - primary_name: primary_opts[:name], - task_sup: task_sup_name, - keyslot: keyslot, - stats: stats - } - - # Prepare child_spec - child_spec = - Nebulex.Adapters.Supervisor.child_spec( - name: normalize_module_name([name, Supervisor]), - strategy: :rest_for_one, - children: [ - {cache.__primary__, primary_opts}, - {__MODULE__.Bootstrap, {Map.put(adapter_meta, :cache, cache), opts}} - | children - ] - ) - - {:ok, child_spec, adapter_meta} - end - - if Code.ensure_loaded?(:erpc) do - defp task_sup_child_spec(_name, _opts) do - {nil, []} - end - else - defp task_sup_child_spec(name, opts) do - # task supervisor to execute parallel and/or remote commands - task_sup_name = normalize_module_name([name, TaskSupervisor]) - task_sup_opts = Keyword.get(opts, :task_supervisor_opts, []) - - children = [ - {Task.Supervisor, [name: task_sup_name] ++ task_sup_opts} - ] - - {task_sup_name, children} - end - end - - ## Nebulex.Adapter.Entry - - @impl true - defspan get(adapter_meta, key, opts) do - call(adapter_meta, key, :get, [key, opts], opts) - end - - @impl true - defspan get_all(adapter_meta, keys, opts) do - map_reduce( - keys, - adapter_meta, - :get_all, - [opts], - Keyword.get(opts, :timeout), - { - %{}, - fn - {:ok, res}, _, acc when is_map(res) -> - Map.merge(acc, res) - - _, _, acc -> - acc - end - } - ) - end - - @impl true - defspan put(adapter_meta, key, value, _ttl, on_write, opts) do - case on_write do - :put -> - :ok = call(adapter_meta, key, :put, [key, value, opts], opts) - true - - :put_new -> - call(adapter_meta, key, :put_new, [key, value, opts], opts) - - :replace -> - call(adapter_meta, key, :replace, [key, value, opts], opts) - end - end - - @impl true - defspan put_all(adapter_meta, entries, _ttl, on_write, opts) do - case on_write do - :put -> - do_put_all(:put_all, adapter_meta, entries, opts) - - :put_new -> - do_put_all(:put_new_all, adapter_meta, entries, opts) - end - end - - def do_put_all(action, adapter_meta, entries, opts) do - reducer = { - {true, []}, - fn - {:ok, :ok}, {_, {_, _, [_, _, [kv, _]]}}, {bool, acc} -> - {bool, Enum.reduce(kv, acc, &[elem(&1, 0) | &2])} - - {:ok, true}, {_, {_, _, [_, _, [kv, _]]}}, {bool, acc} -> - {bool, Enum.reduce(kv, acc, &[elem(&1, 0) | &2])} - - {:ok, false}, _, {_, acc} -> - {false, acc} - - {:error, _}, _, {_, acc} -> - {false, acc} - end - } - - entries - |> map_reduce( - adapter_meta, - action, - [opts], - Keyword.get(opts, :timeout), - reducer - ) - |> case do - {true, _} -> - true - - {false, keys} -> - :ok = Enum.each(keys, &delete(adapter_meta, &1, [])) - action == :put_all - end - end - - @impl true - defspan delete(adapter_meta, key, opts) do - call(adapter_meta, key, :delete, [key, opts], opts) - end - - @impl true - defspan take(adapter_meta, key, opts) do - call(adapter_meta, key, :take, [key, opts], opts) - end - - @impl true - defspan has_key?(adapter_meta, key) do - call(adapter_meta, key, :has_key?, [key]) - end - - @impl true - defspan update_counter(adapter_meta, key, amount, _ttl, _default, opts) do - call(adapter_meta, key, :incr, [key, amount, opts], opts) - end - - @impl true - defspan ttl(adapter_meta, key) do - call(adapter_meta, key, :ttl, [key]) - end - - @impl true - defspan expire(adapter_meta, key, ttl) do - call(adapter_meta, key, :expire, [key, ttl]) - end - - @impl true - defspan touch(adapter_meta, key) do - call(adapter_meta, key, :touch, [key]) - end - - ## Nebulex.Adapter.Queryable - - @impl true - defspan execute(adapter_meta, operation, query, opts) do - reducer = - case operation do - :all -> &List.flatten/1 - _ -> &Enum.sum/1 - end - - adapter_meta.task_sup - |> RPC.multi_call( - Cluster.get_nodes(adapter_meta.name), - __MODULE__, - :with_dynamic_cache, - [adapter_meta, operation, [query, opts]], - opts - ) - |> handle_rpc_multi_call(operation, reducer) - end - - @impl true - defspan stream(adapter_meta, query, opts) do - Stream.resource( - fn -> - Cluster.get_nodes(adapter_meta.name) - end, - fn - [] -> - {:halt, []} - - [node | nodes] -> - elements = - rpc_call( - adapter_meta.task_sup, - node, - __MODULE__, - :eval_stream, - [adapter_meta, query, opts], - opts - ) - - {elements, nodes} - end, - & &1 - ) - end - - ## Nebulex.Adapter.Persistence - - @impl true - defspan dump(adapter_meta, path, opts) do - super(adapter_meta, path, opts) - end - - @impl true - defspan load(adapter_meta, path, opts) do - super(adapter_meta, path, opts) - end - - ## Nebulex.Adapter.Transaction - - @impl true - defspan transaction(adapter_meta, opts, fun) do - super(adapter_meta, Keyword.put(opts, :nodes, Cluster.get_nodes(adapter_meta.name)), fun) - end - - @impl true - defspan in_transaction?(adapter_meta) do - super(adapter_meta) - end - - ## Nebulex.Adapter.Stats - - @impl true - defspan stats(adapter_meta) do - with_dynamic_cache(adapter_meta, :stats, []) - end - - ## Helpers - - @doc """ - Helper function to use dynamic cache for internal primary cache storage - when needed. - """ - def with_dynamic_cache(%{cache: cache, primary_name: nil}, action, args) do - apply(cache.__primary__, action, args) - end - - def with_dynamic_cache(%{cache: cache, primary_name: primary_name}, action, args) do - cache.__primary__.with_dynamic_cache(primary_name, fn -> - apply(cache.__primary__, action, args) - end) - end - - @doc """ - Helper to perform `stream/3` locally. - """ - def eval_stream(meta, query, opts) do - meta - |> with_dynamic_cache(:stream, [query, opts]) - |> Enum.to_list() - end - - ## Private Functions - - defp get_node(%{name: name, keyslot: keyslot}, key) do - Cluster.get_node(name, key, keyslot) - end - - defp call(adapter_meta, key, action, args, opts \\ []) do - adapter_meta - |> get_node(key) - |> rpc_call(adapter_meta, action, args, opts) - end - - defp rpc_call(node, %{task_sup: task_sup} = meta, fun, args, opts) do - rpc_call(task_sup, node, __MODULE__, :with_dynamic_cache, [meta, fun, args], opts) - end - - if Code.ensure_loaded?(:erpc) do - defp rpc_call(supervisor, node, mod, fun, args, opts) do - RPC.call(supervisor, node, mod, fun, args, opts[:timeout] || 5000) - end - else - defp rpc_call(supervisor, node, mod, fun, args, opts) do - case RPC.call(supervisor, node, mod, fun, args, opts[:timeout] || 5000) do - {:badrpc, remote_ex} -> - raise remote_ex - - response -> - response - end - end - end - - defp group_keys_by_node(enum, adapter_meta) do - Enum.reduce(enum, %{}, fn - {key, _} = entry, acc -> - node = get_node(adapter_meta, key) - Map.put(acc, node, [entry | Map.get(acc, node, [])]) - - key, acc -> - node = get_node(adapter_meta, key) - Map.put(acc, node, [key | Map.get(acc, node, [])]) - end) - end - - defp map_reduce( - enum, - %{task_sup: task_sup} = meta, - action, - args, - timeout, - reducer - ) do - groups = - enum - |> group_keys_by_node(meta) - |> Enum.map(fn {node, group} -> - {node, {__MODULE__, :with_dynamic_cache, [meta, action, [group | args]]}} - end) - - RPC.multi_call(task_sup, groups, timeout: timeout, reducer: reducer) - end - - defp handle_rpc_multi_call({res, []}, _action, fun) do - fun.(res) - end - - defp handle_rpc_multi_call({responses, errors}, action, _) do - raise Nebulex.RPCMultiCallError, action: action, responses: responses, errors: errors - end -end - -defmodule Nebulex.Adapters.Partitioned.Bootstrap do - @moduledoc false - use GenServer - - import Nebulex.Helpers - - alias Nebulex.Cache.Cluster - alias Nebulex.Telemetry - - # Default join timeout - @join_timeout :timer.seconds(180) - - # State - defstruct [:adapter_meta, :join_timeout] - - ## API - - @doc false - def start_link({%{name: name}, _} = state) do - GenServer.start_link( - __MODULE__, - state, - name: normalize_module_name([name, Bootstrap]) - ) - end - - ## GenServer Callbacks - - @impl true - def init({adapter_meta, opts}) do - # Trap exit signals to run cleanup job - _ = Process.flag(:trap_exit, true) - - # Bootstrap started - :ok = dispatch_telemetry_event(:started, adapter_meta) - - # Ensure joining the cluster when the cache supervision tree is started - :ok = Cluster.join(adapter_meta.name) - - # Bootstrap joined the cache to the cluster - :ok = dispatch_telemetry_event(:joined, adapter_meta) - - # Build initial state - state = build_state(adapter_meta, opts) - - # Start bootstrap process - {:ok, state, state.join_timeout} - end - - @impl true - def handle_info(message, state) - - def handle_info(:timeout, %__MODULE__{adapter_meta: adapter_meta} = state) do - # Ensure it is always joined to the cluster - :ok = Cluster.join(adapter_meta.name) - - # Bootstrap joined the cache to the cluster - :ok = dispatch_telemetry_event(:joined, adapter_meta) - - {:noreply, state, state.join_timeout} - end - - def handle_info({:EXIT, _from, reason}, %__MODULE__{adapter_meta: adapter_meta} = state) do - # Bootstrap received exit signal - :ok = dispatch_telemetry_event(:exit, adapter_meta, %{reason: reason}) - - {:stop, reason, state} - end - - @impl true - def terminate(reason, %__MODULE__{adapter_meta: adapter_meta}) do - # Ensure leaving the cluster when the cache stops - :ok = Cluster.leave(adapter_meta.name) - - # Bootstrap stopped or terminated - :ok = dispatch_telemetry_event(:stopped, adapter_meta, %{reason: reason}) - end - - ## Private Functions - - defp build_state(adapter_meta, opts) do - # Join timeout to ensure it is always joined to the cluster - join_timeout = - get_option( - opts, - :join_timeout, - "an integer > 0", - &(is_integer(&1) and &1 > 0), - @join_timeout - ) - - %__MODULE__{adapter_meta: adapter_meta, join_timeout: join_timeout} - end - - defp dispatch_telemetry_event(event, adapter_meta, meta \\ %{}) do - Telemetry.execute( - adapter_meta.telemetry_prefix ++ [:bootstrap, event], - %{system_time: System.system_time()}, - Map.merge(meta, %{ - adapter_meta: adapter_meta, - cluster_nodes: Cluster.get_nodes(adapter_meta.name) - }) - ) - end -end diff --git a/lib/nebulex/adapters/replicated.ex b/lib/nebulex/adapters/replicated.ex deleted file mode 100644 index 7502be76..00000000 --- a/lib/nebulex/adapters/replicated.ex +++ /dev/null @@ -1,833 +0,0 @@ -defmodule Nebulex.Adapters.Replicated do - @moduledoc ~S""" - Built-in adapter for replicated cache topology. - - ## Overall features - - * Replicated cache topology. - * Configurable primary storage adapter. - * Cache-level locking when deleting all entries or adding new nodes. - * Key-level (or entry-level) locking for key-based write-like operations. - * Support for transactions via Erlang global name registration facility. - * Stats support rely on the primary storage adapter. - - ## Replicated Cache Topology - - A replicated cache is a clustered, fault tolerant cache where data is fully - replicated to every member in the cluster. This cache offers the fastest read - performance with linear performance scalability for reads but poor scalability - for writes (as writes must be processed by every member in the cluster). - Because data is replicated to all servers, adding servers does not increase - aggregate cache capacity. - - There are several challenges to building a reliably replicated cache. The - first is how to get it to scale and perform well. Updates to the cache have - to be sent to all cluster nodes, and all cluster nodes have to end up with - the same data, even if multiple updates to the same piece of data occur at - the same time. Also, if a cluster node requests a lock, ideally it should - not have to get all cluster nodes to agree on the lock or at least do it in - a very efficient way (`:global` is used here), otherwise it will scale - extremely poorly; yet in the case of a cluster node failure, all of the data - and lock information must be kept safely. - - The best part of a replicated cache is its access speed. Since the data is - replicated to each cluster node, it is available for use without any waiting. - This is referred to as "zero latency access," and is perfect for situations - in which an application requires the highest possible speed in its data - access. - - However, there are some limitations: - - * _**Cost Per Update**_ - Updating a replicated cache requires pushing - the new version of the data to all other cluster members, which will - limit scalability if there is a high frequency of updates per member. - - * _**Cost Per Entry**_ - The data is replicated to every cluster member, - so Memory Heap space is used on each member, which will impact - performance for large caches. - - > Based on **"Distributed Caching Essential Lessons"** by **Cameron Purdy**. - - ## Usage - - When used, the Cache expects the `:otp_app` and `:adapter` as options. - The `:otp_app` should point to an OTP application that has the cache - configuration. For example: - - defmodule MyApp.ReplicatedCache do - use Nebulex.Cache, - otp_app: :my_app, - adapter: Nebulex.Adapters.Replicated - end - - Optionally, you can configure the desired primary storage adapter with the - option `:primary_storage_adapter`; defaults to `Nebulex.Adapters.Local`. - - defmodule MyApp.ReplicatedCache do - use Nebulex.Cache, - otp_app: :my_app, - adapter: Nebulex.Adapters.Replicated, - primary_storage_adapter: Nebulex.Adapters.Local - end - - The configuration for the cache must be in your application environment, - usually defined in your `config/config.exs`: - - config :my_app, MyApp.ReplicatedCache, - primary: [ - gc_interval: 3_600_000, - backend: :shards - ] - - If your application was generated with a supervisor (by passing `--sup` - to `mix new`) you will have a `lib/my_app/application.ex` file containing - the application start callback that defines and starts your supervisor. - You just need to edit the `start/2` function to start the cache as a - supervisor on your application's supervisor: - - def start(_type, _args) do - children = [ - {MyApp.ReplicatedCache, []}, - ... - ] - - See `Nebulex.Cache` for more information. - - ## Options - - This adapter supports the following options and all of them can be given via - the cache configuration: - - * `:primary` - The options that will be passed to the adapter associated - with the local primary storage. These options will depend on the local - adapter to use. - - * `:task_supervisor_opts` - Start-time options passed to - `Task.Supervisor.start_link/1` when the adapter is initialized. - - ## Shared options - - Almost all of the cache functions outlined in `Nebulex.Cache` module - accept the following options: - - * `:timeout` - The time-out value in milliseconds for the command that - will be executed. If the timeout is exceeded, then the current process - will exit. For executing a command on remote nodes, this adapter uses - `Task.await/2` internally for receiving the result, so this option tells - how much time the adapter should wait for it. If the timeout is exceeded, - the task is shut down but the current process doesn't exit, only the - result associated with that task is skipped in the reduce phase. - - ## Telemetry events - - This adapter emits all recommended Telemetry events, and documented - in `Nebulex.Cache` module (see **"Adapter-specific events"** section). - - Since the replicated adapter depends on the configured primary storage - adapter (local cache adapter), this one may also emit Telemetry events. - Therefore, there will be events emitted by the replicated adapter as well - as the primary storage adapter. For example, for the cache defined before - `MyApp.ReplicatedCache`, these would be the emitted events: - - * `[:my_app, :replicated_cache, :command, :start]` - * `[:my_app, :replicated_cache, :primary, :command, :start]` - * `[:my_app, :replicated_cache, :command, :stop]` - * `[:my_app, :replicated_cache, :primary, :command, :stop]` - * `[:my_app, :replicated_cache, :command, :exception]` - * `[:my_app, :replicated_cache, :primary, :command, :exception]` - - As you may notice, the telemetry prefix by default for the replicated cache - is `[:my_app, :replicated_cache]`, and the prefix for its primary storage - `[:my_app, :replicated_cache, :primary]`. - - See also the [Telemetry guide](http://hexdocs.pm/nebulex/telemetry.html) - for more information and examples. - - ## Stats - - This adapter depends on the primary storage adapter for the stats support. - Therefore, it is important to ensure the underlying primary storage adapter - does support stats, otherwise, you may get unexpected errors. - - ## Extended API - - This adapter provides some additional convenience functions to the - `Nebulex.Cache` API. - - Retrieving the primary storage or local cache module: - - MyCache.__primary__() - - Retrieving the cluster nodes associated with the given cache name: - - MyCache.nodes() - - Joining the cache to the cluster: - - MyCache.join_cluster() - - Leaving the cluster (removes the cache from the cluster): - - MyCache.leave_cluster() - - ## Adapter-specific telemetry events - - This adapter exposes following Telemetry events: - - * `telemetry_prefix ++ [:replication]` - Dispatched by the adapter - when a replication error occurs due to a write-like operation - under-the-hood. - - * Measurements: `%{rpc_errors: non_neg_integer}` - * Metadata: - - ``` - %{ - adapter_meta: %{optional(atom) => term}, - rpc_errors: [{node, error :: term}] - } - ``` - - * `telemetry_prefix ++ [:bootstrap]` - Dispatched by the adapter at start - time when there are errors while syncing up with the cluster nodes. - - * Measurements: - - ``` - %{ - failed_nodes: non_neg_integer, - remote_errors: non_neg_integer - } - ``` - - * Metadata: - - ``` - %{ - adapter_meta: %{optional(atom) => term}, - failed_nodes: [node], - remote_errors: [term] - } - ``` - - ## Caveats of replicated adapter - - As it is explained in the beginning, a replicated topology not only brings - with advantages (mostly for reads) but also with some limitations and - challenges. - - This adapter uses global locks (via `:global`) for all operation that modify - or alter the cache somehow to ensure as much consistency as possible across - all members of the cluster. These locks may be per key or for the entire cache - depending on the operation taking place. For that reason, it is very important - to be aware about those operation that can potentially lead to performance and - scalability issues, so that you can do a better usage of the replicated - adapter. The following is with the operations and aspects you should pay - attention to: - - * Starting and joining a new replicated node to the cluster is the most - expensive action, because all write-like operations across all members of - the cluster are blocked until the new node completes the synchronization - process, which involves copying cached data from any of the existing - cluster nodes into the new node, and this could be very expensive - depending on the number of caches entries. For that reason, adding new - nodes is considered an expensive operation that should happen only from - time to time. - - * Deleting all entries. When `c:Nebulex.Cache.delete_all/2` action is - executed, like in the previous case, all write-like operations in all - members of the cluster are blocked until the deletion action is completed - (this implies deleting all cached data from all cluster nodes). Therefore, - deleting all entries from cache is also considered an expensive operation - that should happen only from time to time. - - * Write-like operations based on a key only block operations related to - that key across all members of the cluster. This is not as critical as - the previous two cases but it is something to keep in mind anyway because - if there is a highly demanded key in terms of writes, that could be also - a potential bottleneck. - - Summing up, the replicated cache topology along with this adapter should - be used mainly when the the reads clearly dominate over the writes (e.g.: - Reads 80% and Writes 20% or less). Besides, operations like deleting all - entries from cache or adding new nodes must be executed only once in a while - to avoid performance issues, since they are very expensive. - """ - - # Provide Cache Implementation - @behaviour Nebulex.Adapter - @behaviour Nebulex.Adapter.Entry - @behaviour Nebulex.Adapter.Queryable - @behaviour Nebulex.Adapter.Stats - - # Inherit default transaction implementation - use Nebulex.Adapter.Transaction - - # Inherit default persistence implementation - use Nebulex.Adapter.Persistence - - import Bitwise, only: [<<<: 2] - - import Nebulex.Adapter - import Nebulex.Helpers - - alias Nebulex.Cache.Cluster - alias Nebulex.{RPC, Telemetry} - - ## Nebulex.Adapter - - @impl true - defmacro __before_compile__(env) do - otp_app = Module.get_attribute(env.module, :otp_app) - opts = Module.get_attribute(env.module, :opts) - primary = Keyword.get(opts, :primary_storage_adapter, Nebulex.Adapters.Local) - - quote do - defmodule Primary do - @moduledoc """ - This is the cache for the primary storage. - """ - use Nebulex.Cache, - otp_app: unquote(otp_app), - adapter: unquote(primary) - end - - @doc """ - A convenience function for getting the primary storage cache. - """ - def __primary__, do: Primary - - @doc """ - A convenience function for getting the cluster nodes. - """ - def nodes do - Cluster.get_nodes(get_dynamic_cache()) - end - - @doc """ - A convenience function for joining the cache to the cluster. - """ - def join_cluster do - Cluster.join(get_dynamic_cache()) - end - - @doc """ - A convenience function for removing the cache from the cluster. - """ - def leave_cluster do - Cluster.leave(get_dynamic_cache()) - end - end - end - - @impl true - def init(opts) do - # Required options - telemetry_prefix = Keyword.fetch!(opts, :telemetry_prefix) - telemetry = Keyword.fetch!(opts, :telemetry) - cache = Keyword.fetch!(opts, :cache) - name = opts[:name] || cache - - # Maybe use stats - stats = get_boolean_option(opts, :stats) - - # Primary cache options - primary_opts = - Keyword.merge( - [telemetry_prefix: telemetry_prefix ++ [:primary], telemetry: telemetry, stats: stats], - Keyword.get(opts, :primary, []) - ) - - # Maybe put a name to primary storage - primary_opts = - if opts[:name], - do: [name: normalize_module_name([name, Primary])] ++ primary_opts, - else: primary_opts - - # Maybe task supervisor for distributed tasks - {task_sup_name, children} = sup_child_spec(name, opts) - - # Prepare metadata - adapter_meta = %{ - telemetry_prefix: telemetry_prefix, - telemetry: telemetry, - name: name, - primary_name: primary_opts[:name], - task_sup: task_sup_name, - stats: stats - } - - # Prepare child_spec - child_spec = - Nebulex.Adapters.Supervisor.child_spec( - name: normalize_module_name([name, Supervisor]), - strategy: :rest_for_one, - children: [ - {cache.__primary__, primary_opts}, - {__MODULE__.Bootstrap, Map.put(adapter_meta, :cache, cache)} - | children - ] - ) - - {:ok, child_spec, adapter_meta} - end - - if Code.ensure_loaded?(:erpc) do - defp sup_child_spec(_name, _opts) do - {nil, []} - end - else - defp sup_child_spec(name, opts) do - # Task supervisor to execute parallel and/or remote commands - task_sup_name = normalize_module_name([name, TaskSupervisor]) - task_sup_opts = Keyword.get(opts, :task_supervisor_opts, []) - - children = [ - {Task.Supervisor, [name: task_sup_name] ++ task_sup_opts} - ] - - {task_sup_name, children} - end - end - - ## Nebulex.Adapter.Entry - - @impl true - defspan get(adapter_meta, key, opts) do - with_dynamic_cache(adapter_meta, :get, [key, opts]) - end - - @impl true - defspan get_all(adapter_meta, keys, opts) do - with_dynamic_cache(adapter_meta, :get_all, [keys, opts]) - end - - @impl true - defspan put(adapter_meta, key, value, _ttl, on_write, opts) do - case with_transaction(adapter_meta, on_write, [key], [key, value, opts], opts) do - :ok -> true - bool -> bool - end - end - - @impl true - defspan put_all(adapter_meta, entries, _ttl, on_write, opts) do - action = if on_write == :put_new, do: :put_new_all, else: :put_all - keys = for {k, _} <- entries, do: k - - with_transaction(adapter_meta, action, keys, [entries, opts], opts) || action == :put_all - end - - @impl true - defspan delete(adapter_meta, key, opts) do - with_transaction(adapter_meta, :delete, [key], [key, opts], opts) - end - - @impl true - defspan take(adapter_meta, key, opts) do - with_transaction(adapter_meta, :take, [key], [key, opts], opts) - end - - @impl true - defspan update_counter(adapter_meta, key, amount, _ttl, _default, opts) do - with_transaction(adapter_meta, :incr, [key], [key, amount, opts], opts) - end - - @impl true - defspan has_key?(adapter_meta, key) do - with_dynamic_cache(adapter_meta, :has_key?, [key]) - end - - @impl true - defspan ttl(adapter_meta, key) do - with_dynamic_cache(adapter_meta, :ttl, [key]) - end - - @impl true - defspan expire(adapter_meta, key, ttl) do - with_transaction(adapter_meta, :expire, [key], [key, ttl]) - end - - @impl true - defspan touch(adapter_meta, key) do - with_transaction(adapter_meta, :touch, [key], [key]) - end - - ## Nebulex.Adapter.Queryable - - @impl true - defspan execute(adapter_meta, operation, query, opts) do - do_execute(adapter_meta, operation, query, opts) - end - - defp do_execute(%{name: name} = adapter_meta, :delete_all, query, opts) do - # It is blocked until ongoing write operations finish (if there is any). - # Similarly, while it is executed, all later write-like operations are - # blocked until it finishes. - :global.trans( - {name, self()}, - fn -> - multi_call(adapter_meta, :delete_all, [query, opts], opts) - end, - Cluster.get_nodes(name) - ) - end - - defp do_execute(adapter_meta, operation, query, opts) do - with_dynamic_cache(adapter_meta, operation, [query, opts]) - end - - @impl true - defspan stream(adapter_meta, query, opts) do - with_dynamic_cache(adapter_meta, :stream, [query, opts]) - end - - ## Nebulex.Adapter.Persistence - - @impl true - defspan dump(adapter_meta, path, opts) do - super(adapter_meta, path, opts) - end - - @impl true - defspan load(adapter_meta, path, opts) do - super(adapter_meta, path, opts) - end - - ## Nebulex.Adapter.Transaction - - @impl true - defspan transaction(adapter_meta, opts, fun) do - super(adapter_meta, Keyword.put(opts, :nodes, Cluster.get_nodes(adapter_meta.name)), fun) - end - - @impl true - defspan in_transaction?(adapter_meta) do - super(adapter_meta) - end - - ## Nebulex.Adapter.Stats - - @impl true - defspan stats(adapter_meta) do - with_dynamic_cache(adapter_meta, :stats, []) - end - - ## Helpers - - @doc """ - Helper function to use dynamic cache for internal primary cache storage - when needed. - """ - def with_dynamic_cache(%{cache: cache, primary_name: nil}, action, args) do - apply(cache.__primary__, action, args) - end - - def with_dynamic_cache(%{cache: cache, primary_name: primary_name}, action, args) do - cache.__primary__.with_dynamic_cache(primary_name, fn -> - apply(cache.__primary__, action, args) - end) - end - - ## Private Functions - - defp with_transaction(adapter_meta, action, keys, args, opts \\ []) do - do_with_transaction(adapter_meta, action, keys, args, opts, 1) - end - - defp do_with_transaction(%{name: name} = adapter_meta, action, keys, args, opts, times) do - # This is a bit hacky because the `:global_locks` table managed by - # `:global` is being accessed directly breaking the encapsulation. - # So far, this has been the simplest and fastest way to validate if - # the global sync lock `:"$sync_lock"` is set, so we block write-like - # operations until it finishes. The other option would be trying to - # lock the same key `:"$sync_lock"`, and then when the lock is acquired, - # delete it before processing the write operation. But this means another - # global lock across the cluster every time there is a write. So for the - # time being, we just read the global table to validate it which is much - # faster; since it is a local read with the global ETS, there is no global - # locks across the cluster. - case :ets.lookup(:global_locks, :"$sync_lock") do - [_] -> - :ok = random_sleep(times) - - do_with_transaction(adapter_meta, action, keys, args, opts, times + 1) - - [] -> - nodes = Cluster.get_nodes(name) - - # Write-like operation must be wrapped within a transaction - # to ensure proper replication - transaction(adapter_meta, [keys: keys, nodes: nodes], fn -> - multi_call(adapter_meta, action, args, opts) - end) - end - end - - defp multi_call(%{name: name, task_sup: task_sup} = meta, action, args, opts) do - # Run the command locally first - local = with_dynamic_cache(meta, action, args) - - # Run the command on the remote nodes - {ok_nodes, error_nodes} = - RPC.multi_call( - task_sup, - Cluster.get_nodes(name) -- [node()], - __MODULE__, - :with_dynamic_cache, - [meta, action, args], - opts - ) - - # Process the responses adding the local one as source of truth - handle_rpc_multi_call({[local | ok_nodes], error_nodes}, meta, action) - end - - defp handle_rpc_multi_call({res, []}, _meta, _action), do: hd(res) - - defp handle_rpc_multi_call({res, {:sanitized, {[], rpc_errors}}}, meta, action) do - _ = dispatch_replication_error(meta, action, rpc_errors) - hd(res) - end - - defp handle_rpc_multi_call({responses, {:sanitized, {errors, rpc_errors}}}, meta, action) do - _ = dispatch_replication_error(meta, action, rpc_errors) - - raise Nebulex.RPCMultiCallError, action: action, responses: responses, errors: errors - end - - defp handle_rpc_multi_call({responses, errors}, meta, action) do - handle_rpc_multi_call({responses, {:sanitized, sanitize_errors(errors)}}, meta, action) - end - - defp sanitize_errors(errors) do - Enum.reduce(errors, {[], []}, fn - {{:error, {:exception, %Nebulex.RegistryLookupError{} = error, _}}, node}, {acc1, acc2} -> - # The cache was not found in the node, maybe it was stopped and - # "Process Groups" is not updated yet, then ignore the error - {acc1, [{node, error} | acc2]} - - {{:error, {:erpc, :noconnection}}, node}, {acc1, acc2} -> - # Remote node is down and maybe the "Process Groups" is not updated yet - {acc1, [{node, :noconnection} | acc2]} - - error, {acc1, acc2} -> - {[error | acc1], acc2} - end) - end - - defp dispatch_replication_error(adapter_meta, action, rpc_errors) do - if adapter_meta.telemetry or Map.get(adapter_meta, :in_span?, false) do - Telemetry.execute( - adapter_meta.telemetry_prefix ++ [:replication], - %{rpc_errors: length(rpc_errors)}, - %{adapter_meta: adapter_meta, function_name: action, rpc_errors: rpc_errors} - ) - end - end - - # coveralls-ignore-start - - defp random_sleep(times) do - _ = - if rem(times, 10) == 0 do - _ = :rand.seed(:exsplus) - end - - # First time 1/4 seconds, then doubling each time up to 8 seconds max - tmax = - if times > 5 do - 8000 - else - div((1 <<< times) * 1000, 8) - end - - tmax - |> :rand.uniform() - |> Process.sleep() - end - - # coveralls-ignore-stop -end - -defmodule Nebulex.Adapters.Replicated.Bootstrap do - @moduledoc false - use GenServer - - import Nebulex.Helpers - - alias Nebulex.{Adapter, Entry, Telemetry} - alias Nebulex.Adapters.Replicated - alias Nebulex.Cache.Cluster - - # Max retries in intervals of 1 ms (5 seconds). - # If in 5 seconds the cache has not started, stop the server. - @max_retries 5000 - - ## API - - @doc false - def start_link(%{name: name} = adapter_meta) do - GenServer.start_link( - __MODULE__, - adapter_meta, - name: normalize_module_name([name, Bootstrap]) - ) - end - - ## GenServer Callbacks - - @impl true - def init(adapter_meta) do - # Trap exit signals to run cleanup job - _ = Process.flag(:trap_exit, true) - - # Ensure joining the cluster only when the cache supervision tree is started - :ok = Cluster.join(adapter_meta.name) - - # Set a global lock to stop any write operation - # until the synchronization process finishes - :ok = lock(adapter_meta.name) - - # Init retries - state = Map.put(adapter_meta, :retries, 0) - - # Start bootstrap process - {:ok, state, 1} - end - - @impl true - def handle_info(:timeout, %{pid: pid} = state) when is_pid(pid) do - # Start synchronization process - :ok = sync_data(state) - - # Delete global lock set when the server started - :ok = unlock(state.name) - - # Bootstrap process finished - {:noreply, state} - end - - def handle_info(:timeout, %{name: name, retries: retries} = state) - when retries < @max_retries do - Adapter.with_meta(name, fn _adapter, adapter_meta -> - handle_info(:timeout, adapter_meta) - end) - rescue - ArgumentError -> {:noreply, %{state | retries: retries + 1}, 1} - end - - def handle_info(:timeout, state) do - # coveralls-ignore-start - {:stop, :normal, state} - # coveralls-ignore-stop - end - - @impl true - def terminate(_reason, state) do - # Ensure leaving the cluster when the cache stops - :ok = Cluster.leave(state.name) - end - - ## Helpers - - defp lock(name) do - true = :global.set_lock({:"$sync_lock", self()}, Cluster.get_nodes(name)) - - :ok - end - - defp unlock(name) do - true = :global.del_lock({:"$sync_lock", self()}, Cluster.get_nodes(name)) - - :ok - end - - # FIXME: this is because coveralls does not mark this as covered - # coveralls-ignore-start - - defp sync_data(%{name: name} = adapter_meta) do - cluster_nodes = Cluster.get_nodes(name) - - case cluster_nodes -- [node()] do - [] -> - :ok - - nodes -> - # Sync process: - # 1. Push a new generation on all cluster nodes to make the newer one - # empty. - # 2. Copy cached data from one of the cluster nodes; entries will be - # stremed from the older generation since the newer one should be - # empty. - # 3. Push a new generation on the current/new node to make it a mirror - # of the other cluster nodes. - # 4. Reset GC timer for ell cluster nodes (making the generation timer - # gap among cluster nodes as small as possible). - with :ok <- maybe_run_on_nodes(adapter_meta, nodes, :new_generation), - :ok <- copy_entries_from_nodes(adapter_meta, nodes), - :ok <- maybe_run_on_nodes(adapter_meta, [node()], :new_generation) do - maybe_run_on_nodes(adapter_meta, nodes, :reset_generation_timer) - end - end - end - - defp maybe_run_on_nodes(%{cache: cache} = adapter_meta, nodes, fun) do - if cache.__primary__.__adapter__() == Nebulex.Adapters.Local do - nodes - |> :rpc.multicall(Replicated, :with_dynamic_cache, [adapter_meta, fun, []]) - |> handle_multicall(adapter_meta) - else - :ok - end - end - - defp handle_multicall({responses, failed_nodes}, adapter_meta) do - {_ok, errors} = Enum.split_with(responses, &(&1 == :ok)) - - dispatch_bootstrap_error( - adapter_meta, - %{failed_nodes: length(failed_nodes), remote_errors: length(errors)}, - %{failed_nodes: failed_nodes, remote_errors: errors} - ) - end - - defp copy_entries_from_nodes(adapter_meta, nodes) do - nodes - |> Enum.reduce_while([], &stream_entries(adapter_meta, &1, &2)) - |> Enum.each( - &Replicated.with_dynamic_cache( - adapter_meta, - :put, - [&1.key, &1.value, [ttl: Entry.ttl(&1)]] - ) - ) - end - - defp stream_entries(meta, node, acc) do - stream_fun = fn -> - meta - |> Replicated.stream(nil, return: :entry, page_size: 100) - |> Stream.filter(&(not Entry.expired?(&1))) - |> Stream.map(& &1) - |> Enum.to_list() - end - - case :rpc.call(node, Kernel, :apply, [stream_fun, []]) do - {:badrpc, _} -> {:cont, acc} - entries -> {:halt, entries} - end - end - - defp dispatch_bootstrap_error(adapter_meta, measurements, metadata) do - if adapter_meta.telemetry or Map.get(adapter_meta, :in_span?, false) do - Telemetry.execute( - adapter_meta.telemetry_prefix ++ [:bootstrap], - measurements, - Map.put(metadata, :adapter_meta, adapter_meta) - ) - end - end - - # coveralls-ignore-stop -end diff --git a/lib/nebulex/adapters/supervisor.ex b/lib/nebulex/adapters/supervisor.ex deleted file mode 100644 index e1a3670d..00000000 --- a/lib/nebulex/adapters/supervisor.ex +++ /dev/null @@ -1,19 +0,0 @@ -defmodule Nebulex.Adapters.Supervisor do - # Utility module for building a supervisor to wrap up the adapter's children. - @moduledoc false - - @doc """ - Builds a supervisor spec with the given `options` for wrapping up the - adapter's children. - """ - @spec child_spec(Keyword.t()) :: Supervisor.child_spec() - def child_spec(options) do - {children, options} = Keyword.pop(options, :children, []) - - %{ - id: Keyword.fetch!(options, :name), - start: {Supervisor, :start_link, [children, options]}, - type: :supervisor - } - end -end diff --git a/lib/nebulex/cache.ex b/lib/nebulex/cache.ex index 9cfca475..3e19e01f 100644 --- a/lib/nebulex/cache.ex +++ b/lib/nebulex/cache.ex @@ -7,9 +7,12 @@ defmodule Nebulex.Cache do adapter. For example, Nebulex ships with a default adapter that implements a local generational cache. - When used, the Cache expects the `:otp_app` and `:adapter` as options. - The `:otp_app` should point to an OTP application that has the cache - configuration. For example, the Cache: + When used, the defined cache can be configured with the following + compilation time options: + + #{Nebulex.Cache.Options.compile_options_docs()} + + For example, the cache: defmodule MyApp.Cache do use Nebulex.Cache, @@ -20,7 +23,6 @@ defmodule Nebulex.Cache do Could be configured with: config :my_app, MyApp.Cache, - backend: :shards, gc_interval: :timer.hours(12), max_size: 1_000_000, allocated_memory: 2_000_000_000, @@ -33,44 +35,39 @@ defmodule Nebulex.Cache do for more information. In spite of this, the following configuration values are shared across all adapters: - * `:name` - The name of the Cache supervisor process. + #{Nebulex.Cache.Options.start_link_options_docs()} - * `:telemetry_prefix` - It is recommend for adapters to publish events - using the `Telemetry` library. By default, the telemetry prefix is based - on the module name, so if your module is called `MyApp.Cache`, the prefix - will be `[:my_app, :cache]`. See the "Telemetry events" section to see - what events recommended for the adapters to publish.. Note that if you - have multiple caches, you should keep the `:telemetry_prefix` consistent - for each of them and use the `:cache` and/or `:name` (in case of a named - or dynamic cache) properties in the event metadata for distinguishing - between caches. + ## Shared options - * `:telemetry` - An optional flag to tell the adapters whether Telemetry - events should be emitted or not. Defaults to `true`. + All of the cache functions outlined in this module accept the following + options: - * `:stats` - Boolean to define whether or not the cache will provide stats. - Defaults to `false`. Each adapter is responsible for providing stats by - implementing `Nebulex.Adapter.Stats` behaviour. See the "Stats" section - below. + #{Nebulex.Cache.Options.runtime_shared_options_docs()} ## Telemetry events - Similar to Ecto or Phoenix, Nebulex also provides built-in Telemetry events - applied to all caches, and cache adapter-specific events. + There are two types of telemetry events. The emitted by Nebulex and the ones + that are adapter specific. The ones emitted by Nebulex are divided into two + categories: cache lifecycle events and cache command events. Let us take a + closer look at each of them. - ### Nebulex built-in events + ### Cache lifecycle events The following events are emitted by all Nebulex caches: - * `[:nebulex, :cache, :init]` - it is dispatched whenever a cache starts. - The measurement is a single `system_time` entry in native unit. The - metadata is the `:cache` and all initialization options under `:opts`. + * `[:nebulex, :cache, :init]` - It is dispatched whenever a cache starts. + The only measurement is the current system time in native units from + calling: `System.system_time()`. The `:opts` key in the metadata + contains all initialization options. - ### Adapter-specific events + * Measurement: `%{system_time: integer()}` + * Metadata: `%{cache: module(), name: atom(), opts: keyword()}` + + ### Cache command events - It is recommend the adapters to publish certain `Telemetry` events listed - below. Those events will use the `:telemetry_prefix` outlined above which - defaults to `[:my_app, :cache]`. + When option `:telemetry` is set to `true` (the default), Nebulex will emit + Telemetry span events for each cache command, and those will use the + `:telemetry_prefix` outlined above, which defaults to `[:my_app, :cache]`. For instance, to receive all events published by a cache called `MyApp.Cache`, one could define a module: @@ -113,27 +110,26 @@ defmodule Nebulex.Cache do #### `[:my_app, :cache, :command, :start]` - This event should be invoked on every cache call sent to the adapter before - the command logic is executed. + This event is emitted just before a cache command is executed. The `:measurements` map will include the following: * `:system_time` - The current system time in native units from calling: `System.system_time()`. - A Telemetry `:metadata` map including the following fields. Each cache adapter - may emit different information here. For built-in adapters, it will contain: + A Telemetry `:metadata` map including the following fields: * `:adapter_meta` - The adapter metadata. - * `:function_name` - The name of the invoked adapter function. - * `:args` - The arguments of the invoked adapter function, omitting the - first argument, since it is the adapter metadata already included into + * `:command` - The name of the invoked adapter's command. + * `:args` - The arguments of the invoked adapter command, omitting the + first argument, since the adapter's metadata is already included in the event's metadata. + * `:extra_metadata` - Additional provided metadata via the runtime option + `:telemetry_metadata`. #### `[:my_app, :cache, :command, :stop]` - This event should be invoked on every cache call sent to the adapter after - the command logic is executed. + This event is emitted after a cache command is executed. The `:measurements` map will include the following: @@ -141,20 +137,21 @@ defmodule Nebulex.Cache do is given in the `:native` time unit. You can read more about it in the docs for `System.convert_time_unit/3`. - A Telemetry `:metadata` map including the following fields. Each cache adapter - may emit different information here. For built-in adapters, it will contain: + A Telemetry `:metadata` map including the following fields: * `:adapter_meta` - The adapter metadata. - * `:function_name` - The name of the invoked adapter function. - * `:args` - The arguments of the invoked adapter function, omitting the - first argument, since it is the adapter metadata already included into + * `:command` - The name of the invoked adapter's command. + * `:args` - The arguments of the invoked adapter command, omitting the + first argument, since the adapter's metadata is already included in the event's metadata. - * `:result` - The command result. + * `:extra_metadata` - Additional provided metadata via the runtime option + `:telemetry_metadata`. + * `:result` - The command's result. #### `[:my_app, :cache, :command, :exception]` - This event should be invoked when an error or exception occurs while executing - the cache command. + This event is emitted when an error or exception occurs during the + cache command execution. The `:measurements` map will include the following: @@ -162,92 +159,86 @@ defmodule Nebulex.Cache do is given in the `:native` time unit. You can read more about it in the docs for `System.convert_time_unit/3`. - A Telemetry `:metadata` map including the following fields. Each cache adapter - may emit different information here. For built-in adapters, it will contain: + A Telemetry `:metadata` map including the following fields: * `:adapter_meta` - The adapter metadata. - * `:function_name` - The name of the invoked adapter function. - * `:args` - The arguments of the invoked adapter function, omitting the - first argument, since it is the adapter metadata already included into + * `:command` - The name of the invoked adapter's command. + * `:args` - The arguments of the invoked adapter command, omitting the + first argument, since the adapter's metadata is already included in the event's metadata. + * `:extra_metadata` - Additional provided metadata via the runtime option + `:telemetry_metadata`. * `:kind` - The type of the error: `:error`, `:exit`, or `:throw`. * `:reason` - The reason of the error. * `:stacktrace` - The stacktrace. - **NOTE:** The events outlined above are the recommended for the adapters - to dispatch. However, it is highly recommended to review the used adapter - documentation to ensure it is fully compatible with these events, perhaps - differences, or perhaps also additional events. + ### Adapter-specific events - ## Stats + Regardless of whether Nebulex emits the telemetry events outlined above or + not, the adapters can and are free of exposing their own, but they will be + out of Nebulex's scope. Therefore, if you are interested in using specific + adapter events, you should review the adapters' documentation. - Stats are provided by the adapters by implementing the optional behaviour - `Nebulex.Adapter.Stats`. This behaviour exposes a callback to return the - current cache stats. Nevertheless, the behaviour brings with a default - implementation using [Erlang counters][counters], which is used by the - local built-in adapter (`Nebulex.Adapters.Local`). + ## Dynamic caches - [counters]: https://erlang.org/doc/man/counters.html + Nebulex allows you to start multiple processes from the same cache module. + This is typically useful when you want to have different cache instances + but access them through the same cache module. - One can enable the stats by setting the option `:stats` to `true`. - For example, in the configuration file: + When you list a cache in your supervision tree, such as `MyApp.Cache`, + it will start a supervision tree with a process named `MyApp.Cache` + under the hood. By default, the process has the same name as the cache + module itself. Hence, every time you invoke a function in `MyApp.Cache`, + such as `MyApp.Cache.put/3`, Nebulex will execute the command in the + cache process named `MyApp.Cache`. - config :my_app, MyApp.Cache, - stats: true, - ... + However, with Nebulex you can start multiple processes from the same cache. + The only requirement is that they must have different process names, like + this: - > Remember to check if the underlying adapter implements the - `Nebulex.Adapter.Stats` behaviour. + children = [ + MyApp.Cache, + {MyApp.Cache, name: MyApp.UsersCache} + ] - See `c:Nebulex.Cache.stats/0` for more information. + Now you have two cache instances running: one is named `MyApp.Cache` and the + other one is named `MyApp.UsersCache`. You can tell Nebulex which process + you want to use in your cache operations by calling: - ## Dispatching stats via Telemetry + MyApp.Cache.put_dynamic_cache(MyApp.Cache) + MyApp.Cache.put_dynamic_cache(MyApp.UsersCache) - It is possible to emit Telemetry events for the current stats via - `c:Nebulex.Cache.dispatch_stats/1`, but it has to be invoked explicitly; - Nebulex does not emit this Telemetry event automatically. But it is very - easy to emit this event using [`:telemetry_poller`][telemetry_poller]. + Once you call `MyApp.Cache.put_dynamic_cache(name)`, all invocations made on + `MyApp.Cache` will use the cache instance denoted by `name`. - [telemetry_poller]: https://github.com/beam-telemetry/telemetry_poller + Nebulex also provides a handy cache function for running commands when using + dynamic caches: `c:with_dynamic_cache/2`. - For example, one can define a custom pollable measurement: + MyApp.Cache.with_dynamic_cache(MyApp.UsersCache, fn -> + # all commands here will use MyApp.UsersCache + MyApp.Cache.put("u1", "joe") + ... + end) - :telemetry_poller.start_link( - measurements: [ - {MyApp.Cache, :dispatch_stats, []}, - ], - # configure sampling period - default is :timer.seconds(5) - period: :timer.seconds(30), - name: :my_cache_stats_poller - ) + While these functions are handy, you may want to have the ability to pass + the dynamic cache directly to the command, avoiding the boilerplate logic + of using `c:put_dynamic_cache/1` or `c:with_dynamic_cache/2`. From **v3.0**, + all Cache API commands expose an extended callback version that admits a + dynamic cache at the first argument, so you can directly interact with a + cache instance. - Or you can also start the `:telemetry_poller` process along with your - application supervision tree: - - def start(_type, _args) do - my_cache_stats_poller_opts = [ - measurements: [ - {MyApp.Cache, :dispatch_stats, []}, - ], - period: :timer.seconds(30), - name: :my_cache_stats_poller - ] - - children = [ - {MyApp.Cache, []}, - {:telemetry_poller, my_cache_stats_poller_opts} - ] - - opts = [strategy: :one_for_one, name: MyApp.Supervisor] - Supervisor.start_link(children, opts) - end + MyApp.Cache.put(MyApp.UsersCache, "u1", "joe", ttl: :timer.hours(1)) + MyApp.Cache.get(MyApp.UsersCache, "u1", nil, []) + MyApp.Cache.delete(MyApp.UsersCache, "u1", []) - See [Nebulex Telemetry Guide](http://hexdocs.pm/nebulex/telemetry.html) - for more information. + This is another handy way to work with multiple cache instances through + the same cache module. ## Distributed topologies - Nebulex provides the following adapters for distributed topologies: + One of the goals of Nebulex is also to provide the ability to set up + distributed cache topologies, but this feature will depend on the adapters. + However, there are available adapters already for this: * `Nebulex.Adapters.Partitioned` - Partitioned cache topology. * `Nebulex.Adapters.Replicated` - Replicated cache topology. @@ -256,62 +247,118 @@ defmodule Nebulex.Cache do These adapters work more as wrappers for an existing local adapter and provide the distributed topology on top of it. Optionally, you can set the adapter for the primary cache storage with the option `:primary_storage_adapter`. Defaults - to `Nebulex.Adapters.Local`. + to `Nebulex.Adapters.Local`. See the adapters docs for information. """ - @type t :: module + @typedoc "Cache type" + @type t() :: module() @typedoc "Cache entry key" - @type key :: any + @type key() :: any() @typedoc "Cache entry value" - @type value :: any + @type value() :: any() + + @typedoc "Dynamic cache value" + @type dynamic_cache() :: atom() | pid() @typedoc "Cache entries" - @type entries :: map | [{key, value}] + @type entries() :: map() | [{key(), value()}] @typedoc "Cache action options" - @type opts :: Keyword.t() + @type opts() :: keyword() + + @typedoc "The data type for a query" + @type query() :: any() + + @typedoc "Specification key for the item(s) to include in the returned info" + @type info_spec() :: :all | atom() | [atom()] + + @typedoc "The type for the info item's value" + @type info_item() :: any() + + @typedoc "Info map" + @type info_map() :: %{optional(atom()) => any()} + + @typedoc "The data type for the cache information" + @type info_data() :: info_map() | info_item() + + @typedoc "Proxy type for generic Nebulex error" + @type nbx_error_reason() :: Nebulex.Error.t() + + @typedoc "Fetch error reason" + @type fetch_error_reason() :: Nebulex.KeyError.t() | nbx_error_reason() + + @typedoc "Common error type" + @type error_tuple() :: error_tuple(nbx_error_reason()) + + @typedoc "Error type for the given reason" + @type error_tuple(reason) :: {:error, reason} + + @typedoc "Ok/Error tuple with default error reasons" + @type ok_error_tuple(ok) :: ok_error_tuple(ok, nbx_error_reason()) + + @typedoc "Ok/Error type" + @type ok_error_tuple(ok, error) :: {:ok, ok} | {:error, error} + + ## API + + import __MODULE__.Impl @doc false defmacro __using__(opts) do - quote bind_quoted: [opts: opts] do - @behaviour Nebulex.Cache + quote do + unquote(prelude(opts)) + unquote(base_defs()) + unquote(kv_defs()) - alias Nebulex.Cache.{ - Entry, - Persistence, - Queryable, - Stats, - Storage, - Transaction - } + if Nebulex.Adapter.Queryable in behaviours do + unquote(queryable_defs()) + end + + if Nebulex.Adapter.Persistence in behaviours do + unquote(persistence_defs()) + end + + if Nebulex.Adapter.Transaction in behaviours do + unquote(transaction_defs()) + end + + if Nebulex.Adapter.Info in behaviours do + unquote(info_defs()) + end + end + end - alias Nebulex.Hook + defp prelude(opts) do + quote do + @behaviour Nebulex.Cache - {otp_app, adapter, behaviours} = Nebulex.Cache.Supervisor.compile_config(opts) + {otp_app, adapter, behaviours, opts} = Nebulex.Cache.Supervisor.compile_config(unquote(opts)) @otp_app otp_app @adapter adapter @opts opts - @default_dynamic_cache opts[:default_dynamic_cache] || __MODULE__ - @default_key_generator opts[:default_key_generator] || Nebulex.Caching.SimpleKeyGenerator + @default_dynamic_cache @opts[:default_dynamic_cache] || __MODULE__ + @before_compile adapter + end + end + defp base_defs do + quote do ## Config and metadata @impl true def config do {:ok, config} = Nebulex.Cache.Supervisor.runtime_config(__MODULE__, @otp_app, []) + config end @impl true def __adapter__, do: @adapter - @impl true - def __default_key_generator__, do: @default_key_generator - ## Process lifecycle @doc false @@ -329,10 +376,16 @@ defmodule Nebulex.Cache do end @impl true - def stop(timeout \\ 5000) do - Supervisor.stop(get_dynamic_cache(), :normal, timeout) + def stop(opts \\ []) do + stop(get_dynamic_cache(), opts) + end + + @impl true + def stop(name, opts) do + Supervisor.stop(name, :normal, Keyword.get(opts, :timeout, 5000)) end + # Iniline common instructions @compile {:inline, get_dynamic_cache: 0} @impl true @@ -351,197 +404,140 @@ defmodule Nebulex.Cache do try do _ = put_dynamic_cache(name) + fun.() after _ = put_dynamic_cache(default_dynamic_cache) end end + end + end - @impl true - def with_dynamic_cache(name, module, fun, args) do - with_dynamic_cache(name, fn -> apply(module, fun, args) end) - end + defp kv_defs do + quote do + alias Nebulex.Cache.KV - ## Entry + defcacheapi fetch(key, opts \\ []), to: KV - @impl true - def get(key, opts \\ []) do - Entry.get(get_dynamic_cache(), key, opts) - end + defcacheapi fetch!(key, opts \\ []), to: KV - @impl true - def get!(key, opts \\ []) do - Entry.get!(get_dynamic_cache(), key, opts) - end + defcacheapi get(key, default \\ nil, opts \\ []), to: KV - @impl true - def get_all(keys, opts \\ []) do - Entry.get_all(get_dynamic_cache(), keys, opts) - end + defcacheapi get!(key, default \\ nil, opts \\ []), to: KV - @impl true - def put(key, value, opts \\ []) do - Entry.put(get_dynamic_cache(), key, value, opts) - end + defcacheapi put(key, value, opts \\ []), to: KV - @impl true - def put_new(key, value, opts \\ []) do - Entry.put_new(get_dynamic_cache(), key, value, opts) - end + defcacheapi put!(key, value, opts \\ []), to: KV - @impl true - def put_new!(key, value, opts \\ []) do - Entry.put_new!(get_dynamic_cache(), key, value, opts) - end + defcacheapi put_new(key, value, opts \\ []), to: KV - @impl true - def replace(key, value, opts \\ []) do - Entry.replace(get_dynamic_cache(), key, value, opts) - end + defcacheapi put_new!(key, value, opts \\ []), to: KV - @impl true - def replace!(key, value, opts \\ []) do - Entry.replace!(get_dynamic_cache(), key, value, opts) - end + defcacheapi replace(key, value, opts \\ []), to: KV - @impl true - def put_all(entries, opts \\ []) do - Entry.put_all(get_dynamic_cache(), entries, opts) - end + defcacheapi replace!(key, value, opts \\ []), to: KV - @impl true - def put_new_all(entries, opts \\ []) do - Entry.put_new_all(get_dynamic_cache(), entries, opts) - end + defcacheapi put_all(entries, opts \\ []), to: KV - @impl true - def delete(key, opts \\ []) do - Entry.delete(get_dynamic_cache(), key, opts) - end + defcacheapi put_all!(entries, opts \\ []), to: KV - @impl true - def take(key, opts \\ []) do - Entry.take(get_dynamic_cache(), key, opts) - end + defcacheapi put_new_all(entries, opts \\ []), to: KV - @impl true - def take!(key, opts \\ []) do - Entry.take!(get_dynamic_cache(), key, opts) - end + defcacheapi put_new_all!(entries, opts \\ []), to: KV - @impl true - def has_key?(key) do - Entry.has_key?(get_dynamic_cache(), key) - end + defcacheapi delete(key, opts \\ []), to: KV - @impl true - def get_and_update(key, fun, opts \\ []) do - Entry.get_and_update(get_dynamic_cache(), key, fun, opts) - end + defcacheapi delete!(key, opts \\ []), to: KV - @impl true - def update(key, initial, fun, opts \\ []) do - Entry.update(get_dynamic_cache(), key, initial, fun, opts) - end + defcacheapi take(key, opts \\ []), to: KV - @impl true - def incr(key, amount \\ 1, opts \\ []) do - Entry.incr(get_dynamic_cache(), key, amount, opts) - end + defcacheapi take!(key, opts \\ []), to: KV - @impl true - def decr(key, amount \\ 1, opts \\ []) do - Entry.decr(get_dynamic_cache(), key, amount, opts) - end + defcacheapi has_key?(key, opts \\ []), to: KV - @impl true - def ttl(key) do - Entry.ttl(get_dynamic_cache(), key) - end + defcacheapi get_and_update(key, fun, opts \\ []), to: KV - @impl true - def expire(key, ttl) do - Entry.expire(get_dynamic_cache(), key, ttl) - end + defcacheapi get_and_update!(key, fun, opts \\ []), to: KV - @impl true - def touch(key) do - Entry.touch(get_dynamic_cache(), key) - end + defcacheapi update(key, initial, fun, opts \\ []), to: KV - ## Queryable + defcacheapi update!(key, initial, fun, opts \\ []), to: KV - if Nebulex.Adapter.Queryable in behaviours do - @impl true - def all(query \\ nil, opts \\ []) do - Queryable.all(get_dynamic_cache(), query, opts) - end + defcacheapi incr(key, amount \\ 1, opts \\ []), to: KV - @impl true - def count_all(query \\ nil, opts \\ []) do - Queryable.count_all(get_dynamic_cache(), query, opts) - end + defcacheapi incr!(key, amount \\ 1, opts \\ []), to: KV - @impl true - def delete_all(query \\ nil, opts \\ []) do - Queryable.delete_all(get_dynamic_cache(), query, opts) - end + defcacheapi decr(key, amount \\ 1, opts \\ []), to: KV - @impl true - def stream(query \\ nil, opts \\ []) do - Queryable.stream(get_dynamic_cache(), query, opts) - end + defcacheapi decr!(key, amount \\ 1, opts \\ []), to: KV - ## Deprecated functions (for backwards compatibility) + defcacheapi ttl(key, opts \\ []), to: KV - @impl true - defdelegate size, to: __MODULE__, as: :count_all + defcacheapi ttl!(key, opts \\ []), to: KV - @impl true - defdelegate flush, to: __MODULE__, as: :delete_all - end + defcacheapi expire(key, ttl, opts \\ []), to: KV - ## Persistence + defcacheapi expire!(key, ttl, opts \\ []), to: KV - if Nebulex.Adapter.Persistence in behaviours do - @impl true - def dump(path, opts \\ []) do - Persistence.dump(get_dynamic_cache(), path, opts) - end + defcacheapi touch(key, opts \\ []), to: KV - @impl true - def load(path, opts \\ []) do - Persistence.load(get_dynamic_cache(), path, opts) - end - end + defcacheapi touch!(key, opts \\ []), to: KV + end + end - ## Transactions + defp queryable_defs do + quote do + alias Nebulex.Cache.Queryable - if Nebulex.Adapter.Transaction in behaviours do - @impl true - def transaction(opts \\ [], fun) do - Transaction.transaction(get_dynamic_cache(), opts, fun) - end + defcacheapi get_all(query \\ nil, opts \\ []), to: Queryable - @impl true - def in_transaction? do - Transaction.in_transaction?(get_dynamic_cache()) - end - end + defcacheapi get_all!(query \\ nil, opts \\ []), to: Queryable - ## Stats + defcacheapi count_all(query \\ nil, opts \\ []), to: Queryable - if Nebulex.Adapter.Stats in behaviours do - @impl true - def stats do - Stats.stats(get_dynamic_cache()) - end + defcacheapi count_all!(query \\ nil, opts \\ []), to: Queryable - @impl true - def dispatch_stats(opts \\ []) do - Stats.dispatch_stats(get_dynamic_cache(), opts) - end - end + defcacheapi delete_all(query \\ nil, opts \\ []), to: Queryable + + defcacheapi delete_all!(query \\ nil, opts \\ []), to: Queryable + + defcacheapi stream(query \\ nil, opts \\ []), to: Queryable + + defcacheapi stream!(query \\ nil, opts \\ []), to: Queryable + end + end + + defp persistence_defs do + quote do + alias Nebulex.Cache.Persistence + + defcacheapi dump(path, opts \\ []), to: Persistence + + defcacheapi dump!(path, opts \\ []), to: Persistence + + defcacheapi load(path, opts \\ []), to: Persistence + + defcacheapi load!(path, opts \\ []), to: Persistence + end + end + + defp transaction_defs do + quote do + alias Nebulex.Cache.Transaction + + defcacheapi transaction(fun, opts \\ []), to: Transaction + + defcacheapi in_transaction?(opts \\ []), to: Transaction + end + end + + defp info_defs do + quote do + alias Nebulex.Cache.Info + + defcacheapi info(spec \\ :all, opts \\ []), to: Info + + defcacheapi info!(spec \\ :all, opts \\ []), to: Info end end @@ -552,37 +548,24 @@ defmodule Nebulex.Cache do @doc """ A callback executed when the cache starts or when configuration is read. """ - @callback init(config :: Keyword.t()) :: {:ok, Keyword.t()} | :ignore + @doc group: "User callbacks" + @callback init(config :: keyword) :: {:ok, keyword} | :ignore ## Nebulex.Adapter @doc """ Returns the adapter tied to the cache. """ + @doc group: "Runtime API" @callback __adapter__ :: Nebulex.Adapter.t() - @doc """ - Returns the default key generator applied only when using - **"declarative annotation-based caching"** via `Nebulex.Caching.Decorators`. - - Sometimes you may want to set a different key generator when using - declarative caching. By default, the key generator is set to - `Nebulex.Caching.SimpleKeyGenerator`. You can change the default - key generator at compile time with: - - use Nebulex.Cache, default_key_generator: MyKeyGenerator - - See `Nebulex.Caching.Decorators` and `Nebulex.Caching.KeyGenerator` - for more information. - """ - @callback __default_key_generator__ :: Nebulex.Caching.KeyGenerator.t() - @doc """ Returns the adapter configuration stored in the `:otp_app` environment. If the `c:init/1` callback is implemented in the cache, it will be invoked. """ - @callback config() :: Keyword.t() + @doc group: "Runtime API" + @callback config() :: keyword() @doc """ Starts a supervision and return `{:ok, pid}` or just `:ok` if nothing @@ -596,15 +579,36 @@ defmodule Nebulex.Cache do See the configuration in the moduledoc for options shared between adapters, for adapter-specific configuration see the adapter's documentation. """ - @callback start_link(opts) :: - {:ok, pid} - | {:error, {:already_started, pid}} - | {:error, term} + @doc group: "Runtime API" + @callback start_link(opts()) :: + {:ok, pid()} + | {:error, {:already_started, pid()}} + | {:error, any()} @doc """ Shuts down the cache. + + ## Options + + `:timeout` - It is an integer that specifies how many milliseconds to wait + for the cache supervisor process to terminate, or the atom `:infinity` to + wait indefinitely. Defaults to `5000`. See `Supervisor.stop/3`. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + """ + @doc group: "Runtime API" + @callback stop(opts()) :: :ok + + @doc """ + Same as `c:stop/1` but stops the cache instance given in the first argument + `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section at the + module documentation for more information. """ - @callback stop(timeout) :: :ok + @doc group: "Runtime API" + @callback stop(dynamic_cache(), opts()) :: :ok @doc """ Returns the atom name or pid of the current cache @@ -612,34 +616,33 @@ defmodule Nebulex.Cache do See also `c:put_dynamic_cache/1`. """ - @callback get_dynamic_cache() :: atom() | pid() + @doc group: "Runtime API" + @callback get_dynamic_cache() :: dynamic_cache() @doc """ Sets the dynamic cache to be used in further commands (based on Ecto dynamic repo). - There might be cases where we want to have different cache instances but - accessing them through the same cache module. By default, when you call + There are cases where you may want to have different cache instances but + access them through the same cache module. By default, when you call `MyApp.Cache.start_link/1`, it will start a cache with the name `MyApp.Cache`. But it is also possible to start multiple caches by using a different name for each of them: MyApp.Cache.start_link(name: :cache1) - MyApp.Cache.start_link(name: :cache2, backend: :shards) + MyApp.Cache.start_link(name: :cache2) You can also start caches without names by explicitly setting the name to `nil`: - MyApp.Cache.start_link(name: nil, backend: :shards) + MyApp.Cache.start_link(name: nil) > **NOTE:** There may be adapters requiring the `:name` option anyway, therefore, it is highly recommended to see the adapter's documentation you want to use. - However, once the cache is started, it is not possible to interact directly - with it, since all operations through `MyApp.Cache` are sent by default to - the cache named `MyApp.Cache`. But you can change the default cache at - compile-time: + All operations through `MyApp.Cache` are sent by default to the cache named + `MyApp.Cache`. But you can change the default cache at compile-time: use Nebulex.Cache, default_dynamic_cache: :cache_name @@ -649,8 +652,15 @@ defmodule Nebulex.Cache do From this moment on, all future commands performed by the current process will run on `:another_cache_name`. + + Additionally, all cache commands optionally support passing the wanted + dynamic cache (name or PID) as the first argument so you can o directly + interact with a cache instance. See the + ["Dynamic caches"](#module-dynamic-caches) section at the module + documentation for more information. """ - @callback put_dynamic_cache(atom() | pid()) :: atom() | pid() + @doc group: "Runtime API" + @callback put_dynamic_cache(dynamic_cache()) :: dynamic_cache() @doc """ Invokes the given function `fun` for the dynamic cache `name_or_pid`. @@ -663,78 +673,132 @@ defmodule Nebulex.Cache do See `c:get_dynamic_cache/0` and `c:put_dynamic_cache/1`. """ - @callback with_dynamic_cache(name_or_pid :: atom() | pid(), fun) :: term + @doc group: "Runtime API" + @callback with_dynamic_cache(dynamic_cache(), fun()) :: any() + + ## Nebulex.Adapter.KV @doc """ - For the dynamic cache `name_or_pid`, invokes the given function name `fun` - from `module` with the list of arguments `args`. + Fetches the value for a specific `key` in the cache. + + If the cache contains the given `key`, then its value is returned + in the shape of `{:ok, value}`. + + If there's an error with executing the command, `{:error, reason}` + is returned. `reason` is the cause of the error and can be + `Nebulex.KeyError` if the cache does not contain `key`, + `Nebulex.Error` otherwise. + + ## Options + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Example - MyCache.with_dynamic_cache(:my_cache, Module, :some_fun, ["foo", "bar"]) + iex> MyCache.put("foo", "bar") + :ok + iex> MyCache.fetch("foo") + {:ok, "bar"} + + iex> {:error, %Nebulex.KeyError{key: "bar"} = e} = MyCache.fetch("bar") + iex> e.reason + :not_found - See `c:get_dynamic_cache/0` and `c:put_dynamic_cache/1`. """ - @callback with_dynamic_cache( - name_or_pid :: atom() | pid(), - module, - fun :: atom, - args :: [term] - ) :: term - - ## Nebulex.Adapter.Entry + @doc group: "KV API" + @callback fetch(key(), opts()) :: ok_error_tuple(value(), fetch_error_reason()) @doc """ - Gets a value from Cache where the key matches the given `key`. - - Returns `nil` if no result was found. + Same as `c:fetch/2`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. - See the configured adapter documentation for runtime options. + See the ["Dynamic caches"](#module-dynamic-caches) section at the + module documentation for more information. ## Example - iex> MyCache.put("foo", "bar") - :ok + MyCache.fetch(MyCache1, "key", []) - iex> MyCache.get("foo") - "bar" + """ + @doc group: "KV API" + @callback fetch(dynamic_cache(), key(), opts()) :: ok_error_tuple(value(), fetch_error_reason()) - iex> MyCache.get(:non_existent_key) - nil + @doc """ + Same as `c:fetch/2` but raises `Nebulex.KeyError` if the cache doesn't + contain `key`, or `Nebulex.Error` if any other error occurs while executing + the command. + """ + @doc group: "KV API" + @callback fetch!(key(), opts()) :: value() + @doc """ + Same as `c:fetch/3` but raises `Nebulex.KeyError` if the cache doesn't + contain `key`, or `Nebulex.Error` if any other error occurs while executing + the command. """ - @callback get(key, opts) :: value + @doc group: "KV API" + @callback fetch!(dynamic_cache(), key(), opts()) :: value() @doc """ - Similar to `c:get/2` but raises `KeyError` if `key` is not found. + Gets a value from cache where the key matches the given `key`. + + If the cache contains the given `key`, then its value is returned + in the shape of `{:ok, value}`. + + If the cache does not contain `key`, `{:ok, default}` is returned. + + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. + + ## Options - See the configured adapter documentation for runtime options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Example - MyCache.get!(:a) + iex> MyCache.put("foo", "bar") + :ok + iex> MyCache.get("foo") + {:ok, "bar"} + + iex> MyCache.get(:inexistent) + {:ok, nil} + + iex> MyCache.get(:inexistent, :default) + {:ok, :default} """ - @callback get!(key, opts) :: value + @doc group: "KV API" + @callback get(key(), default :: value(), opts()) :: ok_error_tuple(value()) @doc """ - Returns a `map` with all the key-value pairs in the Cache where the key - is in `keys`. + Same as `c:get/3`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. - If `keys` contains keys that are not in the Cache, they're simply ignored. - - See the configured adapter documentation for runtime options. + See the ["Dynamic caches"](#module-dynamic-caches) section at the + module documentation for more information. ## Example - iex> MyCache.put_all([a: 1, c: 3]) - :ok + MyCache.get(MyCache1, "key", nil, []) - iex> MyCache.get_all([:a, :b, :c]) - %{a: 1, c: 3} + """ + @doc group: "KV API" + @callback get(dynamic_cache(), key(), default :: value(), opts()) :: ok_error_tuple(value()) + + @doc """ + Same as `c:get/3` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback get!(key(), default :: value(), opts()) :: value() + @doc """ + Same as `c:get!/4` but raises an exception if an error occurs. """ - @callback get_all(keys :: [key], opts) :: map + @doc group: "KV API" + @callback get!(dynamic_cache(), key(), default :: value(), opts()) :: value() @doc """ Puts the given `value` under `key` into the Cache. @@ -743,747 +807,1182 @@ defmodule Nebulex.Cache do time to live associated with the key is discarded on successful `put` operation. - By default, `nil` values are skipped, which means they are not stored; - the call to the adapter is bypassed. + Returns `:ok` if successful, `{:error, reason}` otherwise. ## Options - * `:ttl` - (positive integer or `:infinity`) Defines the time-to-live - (or expiry time) for the given key in **milliseconds**. Defaults - to `:infinity`. + #{Nebulex.Cache.Options.runtime_common_write_options_docs()} - See the configured adapter documentation for more runtime options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Example iex> MyCache.put("foo", "bar") :ok - If the value is nil, then it is not stored (operation is skipped): - - iex> MyCache.put("foo", nil) - :ok - - Put key with time-to-live: + Putting entries with specific time-to-live: iex> MyCache.put("foo", "bar", ttl: 10_000) :ok - Using Nebulex.Time for TTL: - iex> MyCache.put("foo", "bar", ttl: :timer.hours(1)) :ok iex> MyCache.put("foo", "bar", ttl: :timer.minutes(1)) :ok - iex> MyCache.put("foo", "bar", ttl: :timer.seconds(1)) + iex> MyCache.put("foo", "bar", ttl: :timer.seconds(30)) :ok """ - @callback put(key, value, opts) :: :ok + @doc group: "KV API" + @callback put(key(), value(), opts()) :: :ok | error_tuple() + + @doc """ + Same as `c:put/3`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section at the + module documentation for more information. + + ## Example + + MyCache.put(MyCache1, "foo", "bar", []) + + MyCache.put(MyCache2, "foo", "bar", ttl: :timer.hours(1)) + + """ + @doc group: "KV API" + @callback put(dynamic_cache(), key(), value(), opts()) :: :ok | error_tuple() + + @doc """ + Same as `c:put/3` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback put!(key(), value(), opts()) :: :ok + + @doc """ + Same as `c:put!/4` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback put!(dynamic_cache(), key(), value(), opts()) :: :ok @doc """ - Puts the given `entries` (key/value pairs) into the Cache. It replaces + Puts the given `entries` (key/value pairs) into the cache. It replaces existing values with new values (just as regular `put`). + Returns `:ok` if successful, `{:error, reason}` otherwise. + ## Options - * `:ttl` - (positive integer or `:infinity`) Defines the time-to-live - (or expiry time) for the given key in **milliseconds**. Defaults - to `:infinity`. + #{Nebulex.Cache.Options.runtime_common_write_options_docs()} - See the configured adapter documentation for more runtime options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Example iex> MyCache.put_all(apples: 3, bananas: 1) :ok - iex> MyCache.put_all(%{apples: 2, oranges: 1}, ttl: 10_000) + iex> MyCache.put_all(%{apples: 2, oranges: 1}, ttl: :timer.hours(1)) :ok - Ideally, this operation should be atomic, so all given keys are put at once. - But it depends purely on the adapter's implementation and the backend used - internally by the adapter. Hence, it is recommended to review the adapter's - documentation. + **NOTE:** Ideally, this operation should be atomic, so all given keys are + put at once. But it depends purely on the adapter's implementation and the + backend used internally by the adapter. Hence, it is recommended to review + the adapter's documentation. + """ + @doc group: "KV API" + @callback put_all(entries(), opts()) :: :ok | error_tuple() + + @doc """ + Same as `c:put_all/2`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section at the + module documentation for more information. + + ## Example + + MyCache.put_all(MyCache1, [apples: 3, bananas: 1], []) + + MyCache.put_all(MyCache1, %{apples: 2, oranges: 1}, ttl: :timer.hours(1)) + + """ + @doc group: "KV API" + @callback put_all(dynamic_cache(), entries(), opts()) :: :ok | error_tuple() + + @doc """ + Same as `c:put_all/2` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback put_all!(entries(), opts()) :: :ok + + @doc """ + Same as `c:put_all!/3` but raises an exception if an error occurs. """ - @callback put_all(entries, opts) :: :ok + @doc group: "KV API" + @callback put_all!(dynamic_cache(), entries(), opts()) :: :ok @doc """ Puts the given `value` under `key` into the cache, only if it does not already exist. - Returns `true` if a value was set, otherwise, `false` is returned. + Returns `{:ok, true}` if a value was set, otherwise, `{:ok, false}` + is returned. - By default, `nil` values are skipped, which means they are not stored; - the call to the adapter is bypassed. + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. ## Options - * `:ttl` - (positive integer or `:infinity`) Defines the time-to-live - (or expiry time) for the given key in **milliseconds**. Defaults - to `:infinity`. + #{Nebulex.Cache.Options.runtime_common_write_options_docs()} - See the configured adapter documentation for more runtime options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Example iex> MyCache.put_new("foo", "bar") - true - - iex> MyCache.put_new("foo", "bar") - false - - If the value is nil, it is not stored (operation is skipped): - - iex> MyCache.put_new("other", nil) - true + {:ok, true} + iex> MyCache.put_new("foo", "bar", ttt: :timer.hours(1)) + {:ok, false} """ - @callback put_new(key, value, opts) :: boolean + @doc group: "KV API" + @callback put_new(key(), value(), opts()) :: ok_error_tuple(boolean()) @doc """ - Similar to `c:put_new/3` but raises `Nebulex.KeyAlreadyExistsError` if the - key already exists. + Same as `c:put_new/3`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. - See `c:put_new/3` for general considerations and options. + See the ["Dynamic caches"](#module-dynamic-caches) section at the + module documentation for more information. ## Example - iex> MyCache.put_new!("foo", "bar") - true + MyCache.put_new(MyCache1, "foo", "bar", []) + + MyCache.put_new(MyCache1, "foo", "bar", ttt: :timer.hours(1)) + + """ + @doc group: "KV API" + @callback put_new(dynamic_cache(), key(), value(), opts()) :: ok_error_tuple(boolean()) + + @doc """ + Same as `c:put_new/3` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback put_new!(key(), value(), opts()) :: boolean() + @doc """ + Same as `c:put_new!/4` but raises an exception if an error occurs. """ - @callback put_new!(key, value, opts) :: true + @doc group: "KV API" + @callback put_new!(dynamic_cache(), key(), value(), opts()) :: boolean() @doc """ Puts the given `entries` (key/value pairs) into the `cache`. It will not perform any operation at all even if just a single key already exists. - Returns `true` if all entries were successfully set. It returns `false` + Returns `{:ok, true}` if all entries were successfully set, or `{:ok, false}` if no key was set (at least one key already existed). + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. + ## Options - * `:ttl` - (positive integer or `:infinity`) Defines the time-to-live - (or expiry time) for the given key in **milliseconds**. Defaults - to `:infinity`. + #{Nebulex.Cache.Options.runtime_common_write_options_docs()} - See the configured adapter documentation for more runtime options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Example iex> MyCache.put_new_all(apples: 3, bananas: 1) - true + {:ok, true} + iex> MyCache.put_new_all(%{apples: 3, oranges: 1}, ttl: :timer.hours(1)) + {:ok, false} + + **NOTE:** Ideally, this operation should be atomic, so all given keys are + put at once. But it depends purely on the adapter's implementation and the + backend used internally by the adapter. Hence, it is recommended to review + the adapter's documentation. + """ + @doc group: "KV API" + @callback put_new_all(entries(), opts()) :: ok_error_tuple(boolean()) + + @doc """ + Same as `c:put_new_all/2`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section at the + module documentation for more information. - iex> MyCache.put_new_all(%{apples: 3, oranges: 1}, ttl: 10_000) - false + ## Example + + MyCache.put_new_all(MyCache1, [apples: 3, bananas: 1], []) + + MyCache.put_new_all(MyCache1, %{apples: 3, oranges: 1}, ttl: 10_000) + + """ + @doc group: "KV API" + @callback put_new_all(dynamic_cache(), entries(), opts()) :: ok_error_tuple(boolean()) + + @doc """ + Same as `c:put_new_all/2` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback put_new_all!(entries(), opts()) :: boolean() - Ideally, this operation should be atomic, so all given keys are put at once. - But it depends purely on the adapter's implementation and the backend used - internally by the adapter. Hence, it is recommended to review the adapter's - documentation. + @doc """ + Same as `c:put_new_all!/3` but raises an exception if an error occurs. """ - @callback put_new_all(entries, opts) :: boolean + @doc group: "KV API" + @callback put_new_all!(dynamic_cache(), entries(), opts()) :: boolean() @doc """ Alters the entry stored under `key`, but only if the entry already exists into the Cache. - Returns `true` if a value was set, otherwise, `false` is returned. + Returns `{:ok, true}` if a value was set, otherwise, `{:ok, false}` + is returned. - By default, `nil` values are skipped, which means they are not stored; - the call to the adapter is bypassed. + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. ## Options - * `:ttl` - (positive integer or `:infinity`) Defines the time-to-live - (or expiry time) for the given key in **milliseconds**. Defaults - to `:infinity`. + #{Nebulex.Cache.Options.runtime_common_write_options_docs()} - See the configured adapter documentation for more runtime options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Example iex> MyCache.replace("foo", "bar") - false - + {:ok, false} iex> MyCache.put_new("foo", "bar") - true - + {:ok, true} iex> MyCache.replace("foo", "bar2") - true + {:ok, true} Update current value and TTL: iex> MyCache.replace("foo", "bar3", ttl: 10_000) - true + {:ok, true} """ - @callback replace(key, value, opts) :: boolean + @doc group: "KV API" + @callback replace(key(), value(), opts()) :: ok_error_tuple(boolean()) @doc """ - Similar to `c:replace/3` but raises `KeyError` if `key` is not found. + Same as `c:replace/3`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. - See `c:replace/3` for general considerations and options. + See the ["Dynamic caches"](#module-dynamic-caches) section at the + module documentation for more information. ## Example - iex> MyCache.replace!("foo", "bar") - true + MyCache.replace(MyCache1, "foo", "bar", []) + + MyCache.replace(MyCache1, "foo", "bar", ttl: :timer.hours(1)) """ - @callback replace!(key, value, opts) :: true + @doc group: "KV API" + @callback replace(dynamic_cache(), key(), value(), opts()) :: ok_error_tuple(boolean()) @doc """ - Deletes the entry in Cache for a specific `key`. + Same as `c:replace/3` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback replace!(key(), value(), opts()) :: boolean() - See the configured adapter documentation for runtime options. + @doc """ + Same as `c:replace!/4` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback replace!(dynamic_cache(), key(), value(), opts()) :: boolean() - ## Example + @doc """ + Deletes the entry in cache for a specific `key`. + + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. + + ## Options + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + + ## Example iex> MyCache.put(:a, 1) :ok - iex> MyCache.delete(:a) :ok - - iex> MyCache.get(:a) + iex> MyCache.get!(:a) nil - iex> MyCache.delete(:non_existent_key) + iex> MyCache.delete(:inexistent) :ok """ - @callback delete(key, opts) :: :ok + @doc group: "KV API" + @callback delete(key(), opts()) :: :ok | error_tuple() @doc """ - Returns and removes the value associated with `key` in the Cache. - If the `key` does not exist, then `nil` is returned. + Same as `c:delete/2`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. - If `key` is `nil`, the call to the adapter is bypassed, and `nil` is returned. + See the ["Dynamic caches"](#module-dynamic-caches) section at the + module documentation for more information. - See the configured adapter documentation for runtime options. - - ## Examples - - iex> MyCache.put(:a, 1) - :ok + ## Example - iex> MyCache.take(:a) - 1 + iex> MyCache.delete(MyCache1, :a, []) - iex> MyCache.take(:a) - nil + """ + @doc group: "KV API" + @callback delete(dynamic_cache(), key(), opts()) :: :ok | error_tuple() + @doc """ + Same as `c:delete/2` but raises an exception if an error occurs. """ - @callback take(key, opts) :: value + @doc group: "KV API" + @callback delete!(key(), opts()) :: :ok @doc """ - Similar to `c:take/2` but raises `KeyError` if `key` is not found. + Same as `c:delete!/3` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback delete!(dynamic_cache(), key(), opts()) :: :ok - See `c:take/2` for general considerations and options. + @doc """ + Removes and returns the value associated with `key` in the cache. - ## Example + If `key` is present in the cache, its value is removed and then returned + in the shape of `{:ok, value}`. - MyCache.take!(:a) + If there's an error with executing the command, `{:error, reason}` + is returned. `reason` is the cause of the error and can be + `Nebulex.KeyError` if the cache does not contain `key`, + `Nebulex.Error` otherwise. - """ - @callback take!(key, opts) :: value + ## Options - @doc """ - Returns whether the given `key` exists in the Cache. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Examples iex> MyCache.put(:a, 1) :ok + iex> MyCache.take(:a) + {:ok, 1} - iex> MyCache.has_key?(:a) - true - - iex> MyCache.has_key?(:b) - false + iex> {:error, %Nebulex.KeyError{key: :a} = e} = MyCache.take(:a) + iex> e.reason + :not_found """ - @callback has_key?(key) :: boolean + @doc group: "KV API" + @callback take(key(), opts()) :: ok_error_tuple(value(), fetch_error_reason()) @doc """ - Gets the value from `key` and updates it, all in one pass. + Same as `c:take/2`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. - `fun` is called with the current cached value under `key` (or `nil` if `key` - hasn't been cached) and must return a two-element tuple: the current value - (the retrieved value, which can be operated on before being returned) and - the new value to be stored under `key`. `fun` may also return `:pop`, which - means the current value shall be removed from Cache and returned. + See the ["Dynamic caches"](#module-dynamic-caches) section at the + module documentation for more information. - The returned value is a tuple with the current value returned by `fun` and - the new updated value under `key`. + ## Examples - ## Options + MyCache.take(MyCache1, :a, []) - * `:ttl` - (positive integer or `:infinity`) Defines the time-to-live - (or expiry time) for the given key in **milliseconds**. Defaults - to `:infinity`. + """ + @doc group: "KV API" + @callback take(dynamic_cache(), key(), opts()) :: ok_error_tuple(value(), fetch_error_reason()) - See the configured adapter documentation for more runtime options. + @doc """ + Same as `c:take/2` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback take!(key(), opts()) :: value() - ## Examples + @doc """ + Same as `c:take!/3` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback take!(dynamic_cache(), key(), opts()) :: value() - Update nonexistent key: + @doc """ + Determines if the cache contains an entry for the specified `key`. - iex> MyCache.get_and_update(:a, fn current_value -> - ...> {current_value, "value!"} - ...> end) - {nil, "value!"} + More formally, returns `{:ok, true}` if the cache contains the given `key`. + If the cache doesn't contain `key`, `{:ok, :false}` is returned. - Update existing key: + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. - iex> MyCache.get_and_update(:a, fn current_value -> - ...> {current_value, "new value!"} - ...> end) - {"value!", "new value!"} + ## Options - Pop/remove value if exist: + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. - iex> MyCache.get_and_update(:a, fn _ -> :pop end) - {"new value!", nil} + ## Examples - Pop/remove nonexistent key: + iex> MyCache.put(:a, 1) + :ok + iex> MyCache.has_key?(:a) + {:ok, true} - iex> MyCache.get_and_update(:b, fn _ -> :pop end) - {nil, nil} + iex> MyCache.has_key?(:b) + {:ok, false} """ - @callback get_and_update(key, (value -> {current_value, new_value} | :pop), opts) :: - {current_value, new_value} - when current_value: value, new_value: value + @doc group: "KV API" + @callback has_key?(key(), opts()) :: ok_error_tuple(boolean()) @doc """ - Updates the cached `key` with the given function. - - If `key` is present in Cache with value `value`, `fun` is invoked with - argument `value` and its result is used as the new value of `key`. + Same as `c:has_key?/2`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. - If `key` is not present in Cache, `initial` is inserted as the value of `key`. - The initial value will not be passed through the update function. - - ## Options - - * `:ttl` - (positive integer or `:infinity`) Defines the time-to-live - (or expiry time) for the given key in **milliseconds**. Defaults - to `:infinity`. - - See the configured adapter documentation for more runtime options. + See the ["Dynamic caches"](#module-dynamic-caches) section at the + module documentation for more information. ## Examples - iex> MyCache.update(:a, 1, &(&1 * 2)) - 1 - - iex> MyCache.update(:a, 1, &(&1 * 2)) - 2 + MyCache.has_key?(MyCache1, :a, []) """ - @callback update(key, initial :: value, (value -> value), opts) :: value + @doc group: "KV API" + @callback has_key?(dynamic_cache(), key(), opts()) :: ok_error_tuple(boolean()) @doc """ - Increments the counter stored at `key` by the given `amount`. + Increments the counter stored at `key` by the given `amount`, and returns + the current count in the shape of `{:ok, count}`. If `amount < 0` (negative), the value is decremented by that `amount` instead. - ## Options + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. - * `:ttl` - (positive integer or `:infinity`) Defines the time-to-live - (or expiry time) for the given key in **milliseconds**. Defaults - to `:infinity`. + ## Options - * `:default` - If `key` is not present in Cache, the default value is - inserted as initial value of key before the it is incremented. - Defaults to `0`. + #{Nebulex.Cache.Options.update_counter_options_docs()} - See the configured adapter documentation for more runtime options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Examples iex> MyCache.incr(:a) - 1 - + {:ok, 1} iex> MyCache.incr(:a, 2) - 3 - + {:ok, 3} iex> MyCache.incr(:a, -1) - 2 + {:ok, 2} iex> MyCache.incr(:missing_key, 2, default: 10) - 12 + {:ok, 12} """ - @callback incr(key, amount :: integer, opts) :: integer + @doc group: "KV API" + @callback incr(key(), amount :: integer(), opts()) :: ok_error_tuple(integer()) @doc """ - Decrements the counter stored at `key` by the given `amount`. + Same as `c:incr/3`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section at the + module documentation for more information. + + ## Examples + + MyCache.incr(MyCache1, :a, 1, []) + + """ + @doc group: "KV API" + @callback incr(dynamic_cache(), key(), amount :: integer(), opts()) :: ok_error_tuple(integer()) + + @doc """ + Same as `c:incr/3` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback incr!(key(), amount :: integer(), opts()) :: integer() + + @doc """ + Same as `c:incr!/4` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback incr!(dynamic_cache(), key(), amount :: integer(), opts()) :: integer() + + @doc """ + Decrements the counter stored at `key` by the given `amount`, and returns + the current count in the shape of `{:ok, count}`. If `amount < 0` (negative), the value is incremented by that `amount` instead (opposite to `incr/3`). - ## Options + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. - * `:ttl` - (positive integer or `:infinity`) Defines the time-to-live - (or expiry time) for the given key in **milliseconds**. Defaults - to `:infinity`. + ## Options - * `:default` - If `key` is not present in Cache, the default value is - inserted as initial value of key before the it is incremented. - Defaults to `0`. + #{Nebulex.Cache.Options.update_counter_options_docs()} - See the configured adapter documentation for more runtime options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Examples iex> MyCache.decr(:a) - -1 - + {:ok, -1} iex> MyCache.decr(:a, 2) - -3 - + {:ok, -3} iex> MyCache.decr(:a, -1) - -2 + {:ok, -2} iex> MyCache.decr(:missing_key, 2, default: 10) - 8 + {:ok, 8} """ - @callback decr(key, amount :: integer, opts) :: integer + @doc group: "KV API" + @callback decr(key(), amount :: integer(), opts()) :: ok_error_tuple(integer()) @doc """ - Returns the remaining time-to-live for the given `key`. If the `key` does not - exist, then `nil` is returned. + Same as `c:decr/3`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section at the + module documentation for more information. + + ## Examples + + MyCache.decr(MyCache1, :a, 1, []) + + """ + @doc group: "KV API" + @callback decr(dynamic_cache(), key(), amount :: integer(), opts()) :: ok_error_tuple(integer()) + + @doc """ + Same as `c:decr/3` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback decr!(key(), amount :: integer(), opts()) :: integer() + + @doc """ + Same as `c:decr!/4` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback decr!(dynamic_cache(), key(), amount :: integer(), opts()) :: integer() + + @doc """ + Returns the remaining time-to-live for the given `key`. + + If `key` is present in the cache, then its remaining TTL is returned + in the shape of `{:ok, ttl}`. + + If there's an error with executing the command, `{:error, reason}` + is returned. `reason` is the cause of the error and can be + `Nebulex.KeyError` if the cache does not contain `key`, + `Nebulex.Error` otherwise. + + ## Options + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Examples iex> MyCache.put(:a, 1, ttl: 5000) :ok - iex> MyCache.put(:b, 2) :ok - iex> MyCache.ttl(:a) - _remaining_ttl - + {:ok, _remaining_ttl} iex> MyCache.ttl(:b) - :infinity + {:ok, :infinity} - iex> MyCache.ttl(:c) - nil + iex> {:error, %Nebulex.KeyError{key: :c} = e} = MyCache.ttl(:c) + iex> e.reason + :not_found """ - @callback ttl(key) :: timeout | nil + @doc group: "KV API" + @callback ttl(key(), opts()) :: ok_error_tuple(timeout(), fetch_error_reason()) @doc """ - Returns `true` if the given `key` exists and the new `ttl` was successfully - updated, otherwise, `false` is returned. + Same as `c:ttl/2`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section at the + module documentation for more information. + + ## Examples + + MyCache.ttl(MyCache1, :a, []) + + """ + @doc group: "KV API" + @callback ttl(dynamic_cache(), key(), opts()) :: ok_error_tuple(timeout(), fetch_error_reason()) + + @doc """ + Same as `c:ttl/2` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback ttl!(key(), opts()) :: timeout() + + @doc """ + Same as `c:ttl!/3` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback ttl!(dynamic_cache(), key(), opts()) :: timeout() + + @doc """ + Returns `{:ok, true}` if the given `key` exists and the new `ttl` was + successfully updated, otherwise, `{:ok, false}` is returned. + + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. + + ## Options + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Examples iex> MyCache.put(:a, 1) :ok + iex> MyCache.expire(:a, :timer.hours(1)) + {:ok, true} + iex> MyCache.expire(:a, :infinity) + {:ok, true} - iex> MyCache.expire(:a, 5) - true + iex> MyCache.expire(:b, 5) + {:ok, false} - iex> MyCache.expire(:a, :infinity) - true + """ + @doc group: "KV API" + @callback expire(key(), ttl :: timeout(), opts()) :: ok_error_tuple(boolean()) + + @doc """ + Same as `c:expire/3`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section at the + module documentation for more information. + + ## Examples + + MyCache.expire(MyCache1, :a, :timer.hours(1), []) + + """ + @doc group: "KV API" + @callback expire(dynamic_cache(), key(), ttl :: timeout(), opts()) :: ok_error_tuple(boolean()) - iex> MyCache.ttl(:b, 5) - false + @doc """ + Same as `c:expire/3` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback expire!(key(), ttl :: timeout(), opts()) :: boolean() + @doc """ + Same as `c:expire!/4` but raises an exception if an error occurs. """ - @callback expire(key, ttl :: timeout) :: boolean + @doc group: "KV API" + @callback expire!(dynamic_cache(), key(), ttl :: timeout(), opts()) :: boolean() @doc """ - Returns `true` if the given `key` exists and the last access time was - successfully updated, otherwise, `false` is returned. + Returns `{:ok, true}` if the given `key` exists and the last access time was + successfully updated, otherwise, `{:ok, false}` is returned. + + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. + + ## Options + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Examples iex> MyCache.put(:a, 1) :ok - iex> MyCache.touch(:a) - true + {:ok, true} iex> MyCache.ttl(:b) - false + {:ok, false} """ - @callback touch(key) :: boolean + @doc group: "KV API" + @callback touch(key(), opts()) :: ok_error_tuple(boolean()) + + @doc """ + Same as `c:touch/2`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section at the + module documentation for more information. - ## Deprecated Callbacks + ## Examples + + MyCache.touch(MyCache1, :a, []) + + """ + @doc group: "KV API" + @callback touch(dynamic_cache(), key(), opts()) :: ok_error_tuple(boolean()) @doc """ - Returns the total number of cached entries. + Same as `c:touch/2` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback touch!(key(), opts()) :: boolean() + + @doc """ + Same as `c:touch!/3` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback touch!(dynamic_cache(), key(), opts()) :: boolean() + + @doc """ + Gets the value from `key` and updates it, all in one pass. + + `fun` is called with the current cached value under `key` (or `nil` if `key` + hasn't been cached) and must return a two-element tuple: the current value + (the retrieved value, which can be operated on before being returned) and + the new value to be stored under `key`. `fun` may also return `:pop`, which + means the current value shall be removed from Cache and returned. + + This function returns: + + * `{:ok, {current_value, new_value}}` - The `current_value` is the current + cached value and `new_value` the updated one returned by `fun`. + + * `{:error, reason}` - An error occurred executing the command. + `reason` is the cause of the error. + + ## Options + + #{Nebulex.Cache.Options.runtime_common_write_options_docs()} + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Examples - iex> :ok = Enum.each(1..10, &MyCache.put(&1, &1)) - iex> MyCache.size() - 10 + Update nonexistent key: + + iex> MyCache.get_and_update(:a, fn current_value -> + ...> {current_value, "value!"} + ...> end) + {:ok, {nil, "value!"}} + + Update existing key: + + iex> MyCache.get_and_update(:a, fn current_value -> + ...> {current_value, "new value!"} + ...> end) + {:ok, {"value!", "new value!"}} + + Pop/remove value if exist: + + iex> MyCache.get_and_update(:a, fn _ -> :pop end) + {:ok, {"new value!", nil}} + + Pop/remove nonexistent key: - iex> :ok = Enum.each(1..5, &MyCache.delete(&1)) - iex> MyCache.size() - 5 + iex> MyCache.get_and_update(:b, fn _ -> :pop end) + {:ok, {nil, nil}} """ - @doc deprecated: "Use count_all/2 instead" - @callback size() :: integer + @doc group: "KV API" + @callback get_and_update(key(), (value() -> {current_value, new_value} | :pop), opts()) :: + ok_error_tuple({current_value, new_value}) + when current_value: value(), new_value: value() @doc """ - Flushes the cache and returns the number of evicted keys. + Same as `c:get_and_update/3`, but the command is executed on the cache + instance given at the first argument `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section at the + module documentation for more information. ## Examples - iex> :ok = Enum.each(1..5, &MyCache.put(&1, &1)) - iex> MyCache.flush() - 5 + MyCache.get_and_update(MyCache1, :a, &{&1, "value!"}, []) - iex> MyCache.size() - 0 + """ + @doc group: "KV API" + @callback get_and_update( + dynamic_cache(), + key(), + (value() -> {current_value, new_value} | :pop), + opts() + ) :: ok_error_tuple({current_value, new_value}) + when current_value: value(), new_value: value() + @doc """ + Same as `c:get_and_update/3` but raises an exception if an error occurs. """ - @doc deprecated: "Use delete_all/2 instead" - @callback flush() :: integer + @doc group: "KV API" + @callback get_and_update!(key(), (value() -> {current_value, new_value} | :pop), opts()) :: + {current_value, new_value} + when current_value: value(), new_value: value() + + @doc """ + Same as `c:get_and_update!/4` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback get_and_update!( + dynamic_cache(), + key(), + (value() -> {current_value, new_value} | :pop), + opts() + ) :: {current_value, new_value} + when current_value: value(), new_value: value() + + @doc """ + Updates the cached `key` with the given function. + + If` key` is present in the cache, then the existing value is passed to `fun` + and its result is used as the updated value of `key`. If `key` is not present + in the cache, `default` is inserted as the value of `key`. The default value + will not be passed through the update function. + + This function returns: + + * `{:ok, value}` - The value associated to the given `key` has been updated. + + * `{:error, reason}` - An error occurred executing the command. + `reason` is the cause of the error. + + ## Options + + #{Nebulex.Cache.Options.runtime_common_write_options_docs()} + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + + ## Examples + + iex> MyCache.update(:a, 1, &(&1 * 2)) + {:ok, 1} + iex> MyCache.update(:a, 1, &(&1 * 2)) + {:ok, 2} + + """ + @doc group: "KV API" + @callback update(key(), initial :: value(), (value() -> value()), opts()) :: + ok_error_tuple(value()) + + @doc """ + Same as `c:update/4`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section at the + module documentation for more information. + + ## Examples + + MyCache.update(MyCache1, :a, 1, &(&1 * 2), []) + + """ + @doc group: "KV API" + @callback update(dynamic_cache(), key(), initial :: value(), (value() -> value()), opts()) :: + ok_error_tuple(value()) + + @doc """ + Same as `c:update/4` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback update!(key(), initial :: value(), (value() -> value()), opts()) :: value() + + @doc """ + Same as `c:update!/5` but raises an exception if an error occurs. + """ + @doc group: "KV API" + @callback update!(dynamic_cache(), key(), initial :: value(), (value() -> value()), opts()) :: + value() ## Nebulex.Adapter.Queryable - @optional_callbacks all: 2, count_all: 2, delete_all: 2, stream: 2 + @optional_callbacks get_all: 2, + get_all: 3, + get_all!: 2, + get_all!: 3, + count_all: 2, + count_all: 3, + count_all!: 2, + count_all!: 3, + delete_all: 2, + delete_all: 3, + delete_all!: 2, + delete_all!: 3, + stream: 2, + stream: 3, + stream!: 2, + stream!: 3 @doc """ - Fetches all entries from cache matching the given `query`. + Queries the entries in the cache matching with the given `query`. - May raise `Nebulex.QueryError` if query validation fails. + This function returns: - ## Query values + * `{:ok, result}` - The query was successfully executed. The `result` + could be a list with the matched entries or its count. - There are two types of query values. The ones shared and implemented - by all adapters and the ones that are adapter specific. + * `{:error, reason}` - An error occurred executing the command. + `reason` is the cause of the error. - ### Common queries + ## Query values - The following query values are shared and/or supported for all adapters: + The query value depends entirely on the adapter. However, Nebulex suggests + the adapters support the following query values: - * `nil` - Returns a list with all cached entries based on the `:return` - option. + * `nil` - Matching all cached entries. - ### Adapter-specific queries + * `{:in, keys}` - Matches the entries associated with the set of `keys` + requested. For every key that does not hold a value or does not exist, + it is ignored and not added into the returned list. - The `query` value depends entirely on the adapter implementation; it could - any term. Therefore, it is highly recommended to see adapters' documentation - for more information about building queries. For example, the built-in - `Nebulex.Adapters.Local` adapter uses `:ets.match_spec()` for queries, - as well as other pre-defined ones like `:unexpired` and `:expired`. + It is highly recommended to check the adapters' documentation for more + information about their supported queries. For example, adapter + `Nebulex.Adapters.Local` uses `:ets.match_spec()` queries and + pre-defined ones like `:unexpired` and `:expired`. ## Options * `:return` - Tells the query what to return from the matched entries. - See the possible values in the "Query return option" section below. - The default depends on the adapter, for example, the default for the - built-in adapters is `:key`. This option is supported by the built-in - adapters, but it is recommended to see the adapter's documentation - to confirm its compatibility with this option. + See the ["Return option"](#c:get_all/2-return-option) section down + below for possible values. - See the configured adapter documentation for more runtime options. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. - ## Query return option + ### Return option The following are the possible values for the `:return` option: - * `:key` - Returns a list only with the keys. - * `:value` - Returns a list only with the values. - * `:entry` - Returns a list of `t:Nebulex.Entry.t/0`. - * `{:key, :value}` - Returns a list of tuples in the form `{key, value}`. + * `:keys` - Returns a list only with the keys. + * `:values` - Returns a list only with the values. + * `:entries` - (**Default**) Returns a list of tuples in the form + `{key, value}`. See adapters documentation to confirm what of these options are supported and what other added. - ## Example + ## Examples Populate the cache with some entries: - iex> :ok = Enum.each(1..5, &MyCache.put(&1, &1 * 2)) + iex> :ok = MyCache.put_all(a: 1, b: 2, c: 3) - Fetch all (with default params): + Fetch all (default options): - iex> MyCache.all() - [1, 2, 3, 4, 5] + iex> MyCache.get_all() + {:ok, [a: 1, b: 2, c: 3]} - Fetch all entries and return values: + Fetch all entries returning only the keys: - iex> MyCache.all(nil, return: :value) - [2, 4, 6, 8, 10] + iex> MyCache.get_all(nil, return: :keys) + {:ok, [:a, :b, :c]} - Fetch all entries and return them as key/value pairs: + Fetch all entries returning only the values: - iex> MyCache.all(nil, return: {:key, :value}) - [{1, 2}, {2, 4}, {3, 6}, {4, 8}, {5, 10}] + iex> MyCache.get_all(nil, return: :values) + {:ok, [1, 2, 3]} - Fetch all entries that match with the given query assuming we are using - `Nebulex.Adapters.Local` adapter: + Fetch the requested keys (default options): - iex> query = [{{:_, :"$1", :"$2", :_, :_}, [{:>, :"$2", 5}], [:"$1"]}] - iex> MyCache.all(query) - [3, 4, 5] + iex> MyCache.get_all({:in, [:a, :b, :d]}) + {:ok, [a: 1, b: 2]} - ## Query + Fetch the requested keys returning only the keys or values: - Query spec is defined by the adapter, hence, it is recommended to review - adapters documentation. For instance, the built-in `Nebulex.Adapters.Local` - adapter supports `nil | :unexpired | :expired | :ets.match_spec()` as query - value. + iex> MyCache.get_all({:in, [:a, :b, :d]}, return: :keys) + {:ok, [:a, :b]} + iex> MyCache.get_all({:in, [:a, :b, :d]}, return: :values) + {:ok, [1, 2]} - ## Examples + ### Query examples for `Nebulex.Adapters.Local` adapter: + + The `Nebulex.Adapters.Local` adapter supports the following query values + (in addition to the suggested ones `nil`, and `{:in, keys}`): + + * `:unexpired` - Matches all non-expired entries. + * `:expired` - Matches all expired entries. + * `:ets.match_spec()` - ETS match spec. - Additional built-in queries for `Nebulex.Adapters.Local` adapter: + Querying expired and unexpired entries: - iex> unexpired = MyCache.all(:unexpired) - iex> expired = MyCache.all(:expired) + iex> {:ok, unexpired} = MyCache.get_all(:unexpired) + iex> {:ok, expired} = MyCache.get_all(:expired) - If we are using Nebulex.Adapters.Local adapter, the stored entry tuple - `{:entry, key, value, touched, ttl}`, then the match spec could be - something like: + For match-spec queries, it is required to understand the adapter's entry + structure, which is `{:entry, key, value, touched, ttl}`. Hence, one may + write the following query: iex> spec = [ ...> {{:entry, :"$1", :"$2", :_, :_}, - ...> [{:>, :"$2", 5}], [{{:"$1", :"$2"}}]} + ...> [{:>, :"$2", 1}], + ...> [{{:"$1", :"$2"}}]} ...> ] - iex> MyCache.all(spec) - [{3, 6}, {4, 8}, {5, 10}] + iex> MyCache.get_all(spec) + {:ok, [b: 1, c: 3]} The same previous query but using `Ex2ms`: iex> import Ex2ms Ex2ms - iex> spec = ...> fun do - ...> {_. key, value, _, _} when value > 5 -> {key, value} + ...> {_. key, value, _, _} when value > 1 -> {key, value} ...> end + iex> MyCache.get_all(spec) + {:ok, [b: 1, c: 3]} + + """ + @doc group: "Query API" + @callback get_all(query(), opts()) :: ok_error_tuple([any()]) - iex> MyCache.all(spec) - [{3, 6}, {4, 8}, {5, 10}] + @doc """ + Same as `c:get_all/2`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section at the + module documentation for more information. + + ## Examples + + MyCache.get_all(MyCache1, nil, []) """ - @callback all(query :: term, opts) :: [any] + @doc group: "Query API" + @callback get_all(dynamic_cache(), query(), opts()) :: ok_error_tuple([any()]) @doc """ - Similar to `c:all/2` but returns a lazy enumerable that emits all entries + Same as `c:get_all/2` but raises an exception if an error occurs. + """ + @doc group: "Query API" + @callback get_all!(query(), opts()) :: [any()] + + @doc """ + Same as `c:get_all!/3` but raises an exception if an error occurs. + """ + @doc group: "Query API" + @callback get_all!(dynamic_cache(), query(), opts()) :: [any()] + + @doc """ + Similar to `c:get_all/2` but returns a lazy enumerable that emits all entries from the cache matching the given `query`. - If `query` is `nil`, then all entries in cache match and are returned - when the stream is evaluated; based on the `:return` option. + This function returns: + + * `{:ok, Enum.t()}` - The query is valid, then the stream is returned. - May raise `Nebulex.QueryError` if query validation fails. + * `{:error, reason}` - An error occurred executing the command. + `reason` is the cause of the error. ## Query values - See `c:all/2` callback for more information about the query values. + See `c:get_all/2` callback for more information about the query values. ## Options - * `:return` - Tells the query what to return from the matched entries. - See the possible values in the "Query return option" section below. - The default depends on the adapter, for example, the default for the - built-in adapters is `:key`. This option is supported by the built-in - adapters, but it is recommended to see the adapter's documentation - to confirm its compatibility with this option. + * `:return` - Same as `c:get_all/2`. * `:page_size` - Positive integer (>= 1) that defines the page size internally used by the adapter for paginating the results coming back from the cache's backend. Defaults to `20`; it's unlikely this will ever need changing. - See the configured adapter documentation for more runtime options. - - ## Query return option - - The following are the possible values for the `:return` option: - - * `:key` - Returns a list only with the keys. - * `:value` - Returns a list only with the values. - * `:entry` - Returns a list of `t:Nebulex.Entry.t/0`. - * `{:key, :value}` - Returns a list of tuples in the form `{key, value}`. - - See adapters documentation to confirm what of these options are supported - and what other added. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Examples Populate the cache with some entries: - iex> :ok = Enum.each(1..5, &MyCache.put(&1, &1 * 2)) + iex> :ok = MyCache.put_all(a: 1, b: 2, c: 3) Stream all (with default params): - iex> MyCache.stream() |> Enum.to_list() - [1, 2, 3, 4, 5] - - Stream all entries and return values: + iex> {:ok, stream} = MyCache.stream() + iex> Enum.to_list(stream) + [a: 1, b: 2, c: 3] - iex> nil |> MyCache.stream(return: :value, page_size: 3) |> Enum.to_list() - [2, 4, 6, 8, 10] + Stream all entries returning only the keys: - Stream all entries and return them as key/value pairs: + iex> {:ok, stream} = MyCache.stream(nil, return: :keys, page_size: 3) + iex> Enum.to_list(stream) + [:a, :b, :c] - iex> nil |> MyCache.stream(return: {:key, :value}) |> Enum.to_list() - [{1, 2}, {2, 4}, {3, 6}, {4, 8}, {5, 10}] + Stream all entries returning only the values: - Additional built-in queries for `Nebulex.Adapters.Local` adapter: + iex> {:ok, stream} = MyCache.stream(nil, return: :values) + iex> Enum.to_list(stream) + [1, 2, 3] - iex> unexpired_stream = MyCache.stream(:unexpired) - iex> expired_stream = MyCache.stream(:expired) + """ + @doc group: "Query API" + @callback stream(query(), opts()) :: ok_error_tuple(Enum.t()) - If we are using Nebulex.Adapters.Local adapter, the stored entry tuple - `{:entry, key, value, touched, ttl}`, then the match spec could be - something like: + @doc """ + Same as `c:stream/2`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. - iex> spec = [ - ...> {{:entry, :"$1", :"$2", :_, :_}, - ...> [{:>, :"$2", 5}], [{{:"$1", :"$2"}}]} - ...> ] - iex> MyCache.stream(spec, page_size: 100) |> Enum.to_list() - [{3, 6}, {4, 8}, {5, 10}] + See the ["Dynamic caches"](#module-dynamic-caches) section at the + module documentation for more information. - The same previous query but using `Ex2ms`: + ## Examples - iex> import Ex2ms - Ex2ms + MyCache.stream(MyCache1, nil, []) - iex> spec = - ...> fun do - ...> {_, key, value, _, _} when value > 5 -> {key, value} - ...> end + """ + @doc group: "Query API" + @callback stream(dynamic_cache(), query(), opts()) :: ok_error_tuple(Enum.t()) - iex> spec |> MyCache.stream(page_size: 100) |> Enum.to_list() - [{3, 6}, {4, 8}, {5, 10}] + @doc """ + Same as `c:stream/2` but raises an exception if an error occurs. + """ + @doc group: "Query API" + @callback stream!(query(), opts()) :: Enum.t() + @doc """ + Same as `c:stream!/3` but raises an exception if an error occurs. """ - @callback stream(query :: term, opts) :: Enum.t() + @doc group: "Query API" + @callback stream!(dynamic_cache(), query(), opts()) :: Enum.t() @doc """ Deletes all entries matching the given `query`. If `query` is `nil`, then all entries in the cache are deleted. - It returns the number of deleted entries. + This function returns: - May raise `Nebulex.QueryError` if query validation fails. + * `{:ok, deleted_count}` - The query was successfully executed, + the deleted entries count is returned. - See the configured adapter documentation for runtime options. + * `{:error, reason}` - An error occurred executing the command. + `reason` is the cause of the error. ## Query values - See `c:all/2` callback for more information about the query values. + See `c:get_all/2` callback for more information about the query values. + + ## Options + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Example @@ -1494,38 +1993,68 @@ defmodule Nebulex.Cache do Delete all (with default params): iex> MyCache.delete_all() - 5 + {:ok, 5} Delete all entries that match with the given query assuming we are using `Nebulex.Adapters.Local` adapter: iex> query = [{{:_, :"$1", :"$2", :_, :_}, [{:>, :"$2", 5}], [true]}] - iex> MyCache.delete_all(query) + iex> {:ok, deleted_count} = MyCache.delete_all(query) - > For the local adapter you can use [Ex2ms](https://github.com/ericmj/ex2ms) - to build the match specs much easier. + See `c:get_all/2` for more query examples. + """ + @doc group: "Query API" + @callback delete_all(query(), opts()) :: ok_error_tuple(non_neg_integer()) - Additional built-in queries for `Nebulex.Adapters.Local` adapter: + @doc """ + Same as `c:delete_all/2`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section at the + module documentation for more information. + + ## Examples - iex> unexpired = MyCache.delete_all(:unexpired) - iex> expired = MyCache.delete_all(:expired) + MyCache.delete_all(MyCache1, nil, []) """ - @callback delete_all(query :: term, opts) :: integer + @doc group: "Query API" + @callback delete_all(dynamic_cache(), query(), opts()) :: ok_error_tuple(non_neg_integer()) @doc """ - Counts all entries in cache matching the given `query`. + Same as `c:delete_all/2` but raises an exception if an error occurs. + """ + @doc group: "Query API" + @callback delete_all!(query(), opts()) :: integer() + + @doc """ + Same as `c:delete_all!/3` but raises an exception if an error occurs. + """ + @doc group: "Query API" + @callback delete_all!(dynamic_cache(), query(), opts()) :: integer() - It returns the count of the matched entries. + @doc """ + Counts all entries in cache matching the given `query`. If `query` is `nil` (the default), then the total number of cached entries is returned. - May raise `Nebulex.QueryError` if query validation fails. + This function returns: + + * `{:ok, count}` - The query was successfully executed, the `count` of the + matched entries is returned. + + * `{:error, reason}` - An error occurred executing the command. + `reason` is the cause of the error. ## Query values - See `c:all/2` callback for more information about the query values. + See `c:get_all/2` callback for more information about the query values. + + ## Options + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Example @@ -1536,48 +2065,72 @@ defmodule Nebulex.Cache do Count all entries in cache: iex> MyCache.count_all() - 5 + {:ok, 5} Count all entries that match with the given query assuming we are using `Nebulex.Adapters.Local` adapter: iex> query = [{{:_, :"$1", :"$2", :_, :_}, [{:>, :"$2", 5}], [true]}] - iex> MyCache.count_all(query) + iex> {:ok, count} = MyCache.count_all(query) + + See `c:get_all/2` for more query examples. + """ + @doc group: "Query API" + @callback count_all(query(), opts()) :: ok_error_tuple(non_neg_integer()) + + @doc """ + Same as `c:count_all/2`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section at the + module documentation for more information. + + ## Examples - > For the local adapter you can use [Ex2ms](https://github.com/ericmj/ex2ms) - to build the match specs much easier. + MyCache.count_all(MyCache1, nil, []) - Additional built-in queries for `Nebulex.Adapters.Local` adapter: + """ + @doc group: "Query API" + @callback count_all(dynamic_cache(), query(), opts()) :: ok_error_tuple(non_neg_integer()) - iex> unexpired = MyCache.count_all(:unexpired) - iex> expired = MyCache.count_all(:expired) + @doc """ + Same as `c:count_all/2` but raises an exception if an error occurs. + """ + @doc group: "Query API" + @callback count_all!(query(), opts()) :: non_neg_integer() + @doc """ + Same as `c:count_all!/3` but raises an exception if an error occurs. """ - @callback count_all(query :: term, opts) :: integer + @doc group: "Query API" + @callback count_all!(dynamic_cache(), query(), opts()) :: non_neg_integer() ## Nebulex.Adapter.Persistence - @optional_callbacks dump: 2, load: 2 + @optional_callbacks dump: 2, dump: 3, dump!: 2, dump!: 3, load: 2, load: 3, load!: 2, load!: 3 @doc """ Dumps a cache to the given file `path`. - Returns `:ok` if successful, or `{:error, reason}` if an error occurs. + Returns `:ok` if successful, `{:error, reason}` otherwise. ## Options This operation relies entirely on the adapter implementation, which means the options depend on each of them. For that reason, it is recommended to review - the documentation of the adapter to be used. The built-in adapters inherit - the default implementation from `Nebulex.Adapter.Persistence`, hence, review - the available options there. + the documentation of the adapter to be used. Some adapters inherit the default + implementation from `Nebulex.Adapter.Persistence`, hence, review the available + options there. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Examples Populate the cache with some entries: iex> entries = for x <- 1..10, into: %{}, do: {x, x} - iex> MyCache.set_many(entries) + iex> MyCache.put_all(entries) :ok Dump cache to a file: @@ -1586,27 +2139,58 @@ defmodule Nebulex.Cache do :ok """ - @callback dump(path :: Path.t(), opts) :: :ok | {:error, term} + @doc group: "Persistence API" + @callback dump(path :: Path.t(), opts()) :: :ok | error_tuple() + + @doc """ + Same as `c:dump/2`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section at the + module documentation for more information. + + ## Examples + + MyCache.dump(MyCache1, "my_cache", []) + + """ + @doc group: "Persistence API" + @callback dump(dynamic_cache(), path :: Path.t(), opts()) :: :ok | error_tuple() + + @doc """ + Same as `c:dump/2` but raises an exception if an error occurs. + """ + @doc group: "Persistence API" + @callback dump!(path :: Path.t(), opts()) :: :ok + + @doc """ + Same as `c:dump!/3` but raises an exception if an error occurs. + """ + @doc group: "Persistence API" + @callback dump!(dynamic_cache(), path :: Path.t(), opts()) :: :ok @doc """ Loads a dumped cache from the given `path`. - Returns `:ok` if successful, or `{:error, reason}` if an error occurs. + Returns `:ok` if successful, `{:error, reason}` otherwise. ## Options Similar to `c:dump/2`, this operation relies entirely on the adapter implementation, therefore, it is recommended to review the documentation - of the adapter to be used. Similarly, the built-in adapters inherit the - default implementation from `Nebulex.Adapter.Persistence`, hence, review - the available options there. + of the adapter to be used. Similarly, some adapters inherit the default + implementation from `Nebulex.Adapter.Persistence`, hence, review the + available options there. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Examples Populate the cache with some entries: iex> entries = for x <- 1..10, into: %{}, do: {x, x} - iex> MyCache.set_many(entries) + iex> MyCache.put_all(entries) :ok Dump cache to a file: @@ -1620,125 +2204,269 @@ defmodule Nebulex.Cache do :ok """ - @callback load(path :: Path.t(), opts) :: :ok | {:error, term} + @doc group: "Persistence API" + @callback load(path :: Path.t(), opts()) :: :ok | error_tuple() + + @doc """ + Same as `c:load/2`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section at the + module documentation for more information. + + ## Examples + + MyCache.load(MyCache1, "my_cache", []) + + """ + @doc group: "Persistence API" + @callback load(dynamic_cache(), path :: Path.t(), opts()) :: :ok | error_tuple() + + @doc """ + Same as `c:load/2` but raises an exception if an error occurs. + """ + @doc group: "Persistence API" + @callback load!(path :: Path.t(), opts()) :: :ok + + @doc """ + Same as `c:load!/3` but raises an exception if an error occurs. + """ + @doc group: "Persistence API" + @callback load!(dynamic_cache(), path :: Path.t(), opts()) :: :ok ## Nebulex.Adapter.Transaction - @optional_callbacks transaction: 2, in_transaction?: 0 + @optional_callbacks transaction: 2, transaction: 3, in_transaction?: 1, in_transaction?: 2 @doc """ Runs the given function inside a transaction. - A successful transaction returns the value returned by the function. + If an Elixir exception occurs, the exception will bubble up from the + transaction function. If the transaction is aborted, + `{:error, reason}` is returned. - See the configured adapter documentation for runtime options. + A successful transaction returns the value returned by the function wrapped + in a tuple as `{:ok, value}`. + + ### Nested transactions + + If `transaction/2` is called inside another transaction, the function is + simply executed without wrapping the new transaction call in any way. + + ## Options + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Examples - MyCache.transaction fn -> + MyCache.transaction(fn -> alice = MyCache.get(:alice) bob = MyCache.get(:bob) MyCache.put(:alice, %{alice | balance: alice.balance + 100}) MyCache.put(:bob, %{bob | balance: bob.balance + 100}) - end + end) Locking only the involved key (recommended): - MyCache.transaction [keys: [:alice, :bob]], fn -> - alice = MyCache.get(:alice) - bob = MyCache.get(:bob) - MyCache.put(:alice, %{alice | balance: alice.balance + 100}) - MyCache.put(:bob, %{bob | balance: bob.balance + 100}) - end + MyCache.transaction( + fn -> + alice = MyCache.get(:alice) + bob = MyCache.get(:bob) + MyCache.put(:alice, %{alice | balance: alice.balance + 100}) + MyCache.put(:bob, %{bob | balance: bob.balance + 100}) + end, + [keys: [:alice, :bob]] + ) """ - @callback transaction(opts, function :: fun) :: term + @doc group: "Transaction API" + @callback transaction(fun(), opts()) :: ok_error_tuple(any()) @doc """ - Returns `true` if the current process is inside a transaction. + Same as `c:transaction/2`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section at the + module documentation for more information. ## Examples - MyCache.in_transaction? - #=> false + MyCache.transaction( + MyCache1, + fn -> + alice = MyCache.get(:alice) + bob = MyCache.get(:bob) + MyCache.put(:alice, %{alice | balance: alice.balance + 100}) + MyCache.put(:bob, %{bob | balance: bob.balance + 100}) + end, + [keys: [:alice, :bob]] + ) + + """ + @doc group: "Transaction API" + @callback transaction(dynamic_cache(), fun(), opts()) :: ok_error_tuple(any()) + + @doc """ + Returns `{:ok, true}` if the current process is inside a transaction, + otherwise, `{:ok, false}` is returned. + + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. + + ## Options + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + + ## Examples + + MyCache.in_transaction?() + #=> {:ok, false} MyCache.transaction(fn -> - MyCache.in_transaction? #=> true + MyCache.in_transaction? #=> {:ok, true} end) """ - @callback in_transaction?() :: boolean + @doc group: "Transaction API" + @callback in_transaction?(opts()) :: ok_error_tuple(boolean()) + + @doc """ + Same as `c:in_transaction?/1`, but the command is executed on the cache instance + given at the first argument `dynamic_cache`. + + See the ["Dynamic caches"](#module-dynamic-caches) section at the + module documentation for more information. + + ## Examples + + MyCache.in_transaction?(MyCache1, []) + + """ + @doc group: "Transaction API" + @callback in_transaction?(dynamic_cache(), opts()) :: ok_error_tuple(boolean()) - ## Nebulex.Adapter.Stats + ## Nebulex.Adapter.Info - @optional_callbacks stats: 0, dispatch_stats: 1 + @optional_callbacks info: 2, info: 3, info!: 2, info!: 3 @doc """ - Returns `Nebulex.Stats.t()` with the current stats values. + Returns `{:ok, info}` where `info` contains the requested cache information, + as specified by the `spec`. - If the stats are disabled for the cache, then `nil` is returned. + If there's an error with executing the command, `{:error, reason}` + is returned, where `reason` is the cause of the error. - ## Example + The `spec` (information specification key) can be: + + * **The atom `:all`**: returns a map with all information items. + * **An atom**: returns the value for the requested information item. + * **A list of atoms**: returns a map only with the requested information + items. + + If the argument `spec` is omitted, all information items are returned; + same as if the `spec` was the atom `:all`. + + The adapters are free to add the information specification keys they want, + therefore, it is highly recommended to review the adapter's documentation + you're using. However, Nebulex suggests the adapters add the following specs: - iex> MyCache.stats() - %Nebulex.Stats{ - measurements: { + * `:server` - General information about the cache server. E.g.: cache name, + adapter, PID, etc. + * `:memory` - Memory consumption related information. E.g.: used memory, + allocated memory, etc. + * `:stats` - Cache statistics. E.g.: hits, misses, etc. + + ## Examples + + The following examples assume the underlying adapter uses the implementation + provided by `Nebulex.Adapters.Common.Info`. + + iex> {:ok, info} = MyCache.info() + iex> info + %{ + server: %{ + nbx_version: "3.0.0", + cache_module: "MyCache", + cache_adapter: "Nebulex.Adapters.Local", + cache_name: "MyCache", + cache_pid: #PID<0.111.0> + }, + memory: %{ + allocated_memory: 1_000_000, + used_memory: 0 + }, + stats: %{ + deletions: 0, evictions: 0, expirations: 0, hits: 0, misses: 0, updates: 0, writes: 0 + } + } + + iex> {:ok, info} = MyCache.info(:server) + iex> info + %{ + nbx_version: "3.0.0", + cache_module: "MyCache", + cache_adapter: "Nebulex.Adapters.Local", + cache_name: "MyCache", + cache_pid: #PID<0.111.0> + } + + iex> {:ok, info} = MyCache.info([:server, :stats]) + iex> info + %{ + server: %{ + nbx_version: "3.0.0", + cache_module: "MyCache", + cache_adapter: "Nebulex.Adapters.Local", + cache_name: "MyCache", + cache_pid: #PID<0.111.0> }, - metadata: %{} + stats: %{ + deletions: 0, + evictions: 0, + expirations: 0, + hits: 0, + misses: 0, + updates: 0, + writes: 0 + } } """ - @callback stats() :: Nebulex.Stats.t() | nil + @doc group: "Info API" + @callback info(spec :: info_spec(), opts()) :: ok_error_tuple(info_data()) @doc """ - Emits a telemetry event when called with the current stats count. - - The telemetry `:measurements` map will include the same as - `Nebulex.Stats.t()`'s measurements. For example: - - * `:evictions` - Current **evictions** count. - * `:expirations` - Current **expirations** count. - * `:hits` - Current **hits** count. - * `:misses` - Current **misses** count. - * `:updates` - Current **updates** count. - * `:writes` - Current **writes** count. + Same as `c:info/2`, but the command is executed on the cache + instance given at the first argument `dynamic_cache`. - The telemetry `:metadata` map will include the same as `Nebulex.Stats.t()`'s - metadata by default. For example: - - * `:cache` - The cache module, or the name (if an explicit name has been - given to the cache). - - Additionally, you can add your own metadata fields by given the option - `:metadata`. - - ## Options - - * `:event_prefix` – The prefix of the telemetry event. - Defaults to `[:nebulex, :cache]`. - - * `:metadata` – A map with additional metadata fields. Defaults to `%{}`. + See the ["Dynamic caches"](#module-dynamic-caches) section at the + module documentation for more information. ## Examples - iex> MyCache.dispatch_stats() - :ok + MyCache.info(MyCache1, :all, []) - iex> MyCache.Stats.dispatch_stats( - ...> event_prefix: [:my_cache], - ...> metadata: %{tag: "tag1"} - ...> ) - :ok + """ + @doc group: "Info API" + @callback info(dynamic_cache(), spec :: info_spec(), opts()) :: ok_error_tuple(info_data()) + + @doc """ + Same as `c:info/2` but raises an exception if an error occurs. + """ + @doc group: "Info API" + @callback info!(spec :: info_spec(), opts()) :: info_data() - **NOTE:** Since `:telemetry` is an optional dependency, when it is not - defined, a default implementation is provided without any logic, just - returning `:ok`. + @doc """ + Same as `c:info/3` but raises an exception if an error occurs. """ - @callback dispatch_stats(opts) :: :ok + @doc group: "Info API" + @callback info!(dynamic_cache(), spec :: info_spec(), opts()) :: info_data() end diff --git a/lib/nebulex/cache/cluster.ex b/lib/nebulex/cache/cluster.ex deleted file mode 100644 index b7833472..00000000 --- a/lib/nebulex/cache/cluster.ex +++ /dev/null @@ -1,102 +0,0 @@ -defmodule Nebulex.Cache.Cluster do - # The module used by cache adapters for - # distributed caching functionality. - @moduledoc false - - @doc """ - Joins the node where the cache `name`'s supervisor process is running to the - `name`'s node group. - """ - @spec join(name :: atom) :: :ok - def join(name) do - pid = Process.whereis(name) || self() - - if pid in pg_members(name) do - :ok - else - :ok = pg_join(name, pid) - end - end - - @doc """ - Makes the node where the cache `name`'s supervisor process is running, leave - the `name`'s node group. - """ - @spec leave(name :: atom) :: :ok - def leave(name) do - pg_leave(name, Process.whereis(name) || self()) - end - - @doc """ - Returns the list of nodes joined to given `name`'s node group. - """ - @spec get_nodes(name :: atom) :: [node] - def get_nodes(name) do - name - |> pg_members() - |> Enum.map(&node/1) - |> :lists.usort() - end - - @doc """ - Selects one node based on the computation of the `key` slot. - """ - @spec get_node(name_or_nodes :: atom | [node], Nebulex.Cache.key(), keyslot :: module) :: node - def get_node(name_or_nodes, key, keyslot) - - def get_node(name, key, keyslot) when is_atom(name) do - name - |> get_nodes() - |> get_node(key, keyslot) - end - - def get_node(nodes, key, keyslot) when is_list(nodes) do - Enum.at(nodes, keyslot.hash_slot(key, length(nodes))) - end - - ## PG - - if Code.ensure_loaded?(:pg) do - defp pg_join(name, pid) do - :ok = :pg.join(__MODULE__, name, pid) - end - - defp pg_leave(name, pid) do - _ = :pg.leave(__MODULE__, name, pid) - :ok - end - - defp pg_members(name) do - :pg.get_members(__MODULE__, name) - end - else - # Inline common instructions - @compile {:inline, pg2_namespace: 1} - - defp pg_join(name, pid) do - name - |> ensure_namespace() - |> :pg2.join(pid) - end - - defp pg_leave(name, pid) do - name - |> ensure_namespace() - |> :pg2.leave(pid) - end - - defp pg_members(name) do - name - |> ensure_namespace() - |> :pg2.get_members() - end - - defp ensure_namespace(name) do - namespace = pg2_namespace(name) - :ok = :pg2.create(namespace) - namespace - end - - defp pg2_namespace(name), do: {:nbx, name} - end -end diff --git a/lib/nebulex/cache/entry.ex b/lib/nebulex/cache/entry.ex deleted file mode 100644 index 187baafc..00000000 --- a/lib/nebulex/cache/entry.ex +++ /dev/null @@ -1,246 +0,0 @@ -defmodule Nebulex.Cache.Entry do - @moduledoc false - - import Nebulex.Helpers - - alias Nebulex.{Adapter, Time} - - # Inline common instructions - @compile {:inline, get_ttl: 1} - - @doc """ - Implementation for `c:Nebulex.Cache.get/2`. - """ - def get(name, key, opts) do - Adapter.with_meta(name, & &1.get(&2, key, opts)) - end - - @doc """ - Implementation for `c:Nebulex.Cache.get!/2`. - """ - def get!(name, key, opts) do - if result = get(name, key, opts) do - result - else - raise KeyError, key: key, term: name - end - end - - @doc """ - Implementation for `c:Nebulex.Cache.get_all/2`. - """ - def get_all(_name, [], _opts), do: %{} - - def get_all(name, keys, opts) do - Adapter.with_meta(name, & &1.get_all(&2, keys, opts)) - end - - @doc """ - Implementation for `c:Nebulex.Cache.put/3`. - """ - def put(name, key, value, opts) do - true = do_put(name, key, value, :put, opts) - :ok - end - - @doc """ - Implementation for `c:Nebulex.Cache.put_new/3`. - """ - def put_new(name, key, value, opts) do - do_put(name, key, value, :put_new, opts) - end - - @doc """ - Implementation for `c:Nebulex.Cache.put_new!/3`. - """ - def put_new!(name, key, value, opts) do - with false <- put_new(name, key, value, opts) do - raise Nebulex.KeyAlreadyExistsError, cache: name, key: key - end - end - - @doc """ - Implementation for `c:Nebulex.Cache.replace/3`. - """ - def replace(name, key, value, opts) do - do_put(name, key, value, :replace, opts) - end - - @doc """ - Implementation for `c:Nebulex.Cache.replace!/3`. - """ - def replace!(name, key, value, opts) do - with false <- replace(name, key, value, opts) do - raise KeyError, key: key, term: name - end - end - - defp do_put(_name, _key, nil, _on_write, _opts), do: true - - defp do_put(name, key, value, on_write, opts) do - Adapter.with_meta(name, & &1.put(&2, key, value, get_ttl(opts), on_write, opts)) - end - - @doc """ - Implementation for `c:Nebulex.Cache.put_all/2`. - """ - def put_all(name, entries, opts) do - _ = do_put_all(name, entries, :put, opts) - :ok - end - - @doc """ - Implementation for `c:Nebulex.Cache.put_new_all/2`. - """ - def put_new_all(name, entries, opts) do - do_put_all(name, entries, :put_new, opts) - end - - def do_put_all(_name, [], _on_write, _opts), do: true - def do_put_all(_name, entries, _on_write, _opts) when map_size(entries) == 0, do: true - - def do_put_all(name, entries, on_write, opts) do - Adapter.with_meta(name, & &1.put_all(&2, entries, get_ttl(opts), on_write, opts)) - end - - @doc """ - Implementation for `c:Nebulex.Cache.delete/2`. - """ - def delete(name, key, opts) do - Adapter.with_meta(name, & &1.delete(&2, key, opts)) - end - - @doc """ - Implementation for `c:Nebulex.Cache.take/2`. - """ - def take(_name, nil, _opts), do: nil - - def take(name, key, opts) do - Adapter.with_meta(name, & &1.take(&2, key, opts)) - end - - @doc """ - Implementation for `c:Nebulex.Cache.take!/2`. - """ - def take!(name, key, opts) do - if result = take(name, key, opts) do - result - else - raise KeyError, key: key, term: name - end - end - - @doc """ - Implementation for `c:Nebulex.Cache.has_key?/1`. - """ - def has_key?(name, key) do - Adapter.with_meta(name, & &1.has_key?(&2, key)) - end - - @doc """ - Implementation for `c:Nebulex.Cache.get_and_update/3`. - """ - def get_and_update(name, key, fun, opts) when is_function(fun, 1) do - Adapter.with_meta(name, fn adapter, adapter_meta -> - current = adapter.get(adapter_meta, key, opts) - - case fun.(current) do - {get, nil} -> - {get, get} - - {get, update} -> - true = adapter.put(adapter_meta, key, update, get_ttl(opts), :put, opts) - {get, update} - - :pop when is_nil(current) -> - {nil, nil} - - :pop -> - :ok = adapter.delete(adapter_meta, key, opts) - {current, nil} - - other -> - raise ArgumentError, - "the given function must return a two-element tuple or :pop," <> - " got: #{inspect(other)}" - end - end) - end - - @doc """ - Implementation for `c:Nebulex.Cache.update/4`. - """ - def update(name, key, initial, fun, opts) do - Adapter.with_meta(name, fn adapter, adapter_meta -> - adapter_meta - |> adapter.get(key, opts) - |> case do - nil -> {initial, nil} - val -> {fun.(val), val} - end - |> case do - {nil, old} -> - # avoid storing nil values - old - - {new, _} -> - true = adapter.put(adapter_meta, key, new, get_ttl(opts), :put, opts) - new - end - end) - end - - @doc """ - Implementation for `c:Nebulex.Cache.incr/3`. - """ - def incr(name, key, amount, opts) when is_integer(amount) do - default = get_option(opts, :default, "an integer", &is_integer/1, 0) - Adapter.with_meta(name, & &1.update_counter(&2, key, amount, get_ttl(opts), default, opts)) - end - - def incr(_cache, _key, amount, _opts) do - raise ArgumentError, "expected amount to be an integer, got: #{inspect(amount)}" - end - - @doc """ - Implementation for `c:Nebulex.Cache.decr/3`. - """ - def decr(name, key, amount, opts) when is_integer(amount) do - incr(name, key, amount * -1, opts) - end - - def decr(_cache, _key, amount, _opts) do - raise ArgumentError, "expected amount to be an integer, got: #{inspect(amount)}" - end - - @doc """ - Implementation for `c:Nebulex.Cache.ttl/1`. - """ - def ttl(name, key) do - Adapter.with_meta(name, & &1.ttl(&2, key)) - end - - @doc """ - Implementation for `c:Nebulex.Cache.expire/2`. - """ - def expire(name, key, ttl) do - ttl = - (Time.timeout?(ttl) && ttl) || - raise ArgumentError, "expected ttl to be a valid timeout, got: #{inspect(ttl)}" - - Adapter.with_meta(name, & &1.expire(&2, key, ttl)) - end - - @doc """ - Implementation for `c:Nebulex.Cache.touch/1`. - """ - def touch(name, key) do - Adapter.with_meta(name, & &1.touch(&2, key)) - end - - ## Helpers - - defp get_ttl(opts) do - get_option(opts, :ttl, "a valid timeout", &Time.timeout?/1, :infinity) - end -end diff --git a/lib/nebulex/cache/impl.ex b/lib/nebulex/cache/impl.ex new file mode 100644 index 00000000..869c7c20 --- /dev/null +++ b/lib/nebulex/cache/impl.ex @@ -0,0 +1,33 @@ +defmodule Nebulex.Cache.Impl do + @moduledoc false + + @doc """ + Helper macro for defining the functions implementing the Cache API. + """ + defmacro defcacheapi(fun, to: target) do + {name, args} = Macro.decompose_call(fun) + all_args = defcacheapi_all_args(args) + + quote do + @impl true + def unquote(name)(unquote_splicing(args)) do + unquote(name)( + get_dynamic_cache(), + unquote_splicing(all_args) + ) + end + + @impl true + def unquote(name)(dynamic_cache, unquote_splicing(all_args)) do + unquote(target).unquote(name)(dynamic_cache, unquote_splicing(all_args)) + end + end + end + + defp defcacheapi_all_args(args) do + Enum.map(args, fn + {:\\, _, [arg, _]} -> arg + arg -> arg + end) + end +end diff --git a/lib/nebulex/cache/info.ex b/lib/nebulex/cache/info.ex new file mode 100644 index 00000000..fea74dee --- /dev/null +++ b/lib/nebulex/cache/info.ex @@ -0,0 +1,25 @@ +defmodule Nebulex.Cache.Info do + @moduledoc false + + import Nebulex.Adapter, only: [defcommandp: 2] + import Nebulex.Utils, only: [unwrap_or_raise: 1] + + ## API + + @doc """ + Implementation for `c:Nebulex.Cache.info/2`. + """ + def info(name, spec, opts) when is_atom(spec) or is_list(spec) do + do_info(name, spec, opts) + end + + @compile {:inline, do_info: 3} + defcommandp do_info(name, spec, opts), command: :info + + @doc """ + Implementation for `c:Nebulex.Cache.info!/2`. + """ + def info!(name, item, opts) do + unwrap_or_raise info(name, item, opts) + end +end diff --git a/lib/nebulex/cache/kv.ex b/lib/nebulex/cache/kv.ex new file mode 100644 index 00000000..1d15e32a --- /dev/null +++ b/lib/nebulex/cache/kv.ex @@ -0,0 +1,349 @@ +defmodule Nebulex.Cache.KV do + @moduledoc false + + import Nebulex.Adapter + import Nebulex.Utils, only: [unwrap_or_raise: 1] + + alias Nebulex.Cache.Options + alias Nebulex.Time + + @doc """ + Implementation for `c:Nebulex.Cache.fetch/2`. + """ + defcommand fetch(name, key, opts) + + @doc """ + Implementation for `c:Nebulex.Cache.fetch!/2`. + """ + def fetch!(name, key, opts) do + unwrap_or_raise fetch(name, key, opts) + end + + @doc """ + Implementation for `c:Nebulex.Cache.get/3`. + """ + def get(name, key, default, opts) do + with_meta(name, &do_get(&1, key, default, opts)) + end + + defp do_get(adapter_meta, key, default, opts) do + with {:error, %Nebulex.KeyError{key: ^key}} <- run_command(adapter_meta, :fetch, [key, opts]) do + {:ok, default} + end + end + + @doc """ + Implementation for `c:Nebulex.Cache.get!/3`. + """ + def get!(name, key, default, opts) do + unwrap_or_raise get(name, key, default, opts) + end + + @doc """ + Implementation for `c:Nebulex.Cache.put/3`. + """ + def put(name, key, value, opts) do + case do_put(name, key, value, :put, opts) do + {:ok, _} -> :ok + {:error, _} = error -> error + end + end + + @doc """ + Implementation for `c:Nebulex.Cache.put!/3`. + """ + def put!(name, key, value, opts) do + _ = unwrap_or_raise do_put(name, key, value, :put, opts) + + :ok + end + + @doc """ + Implementation for `c:Nebulex.Cache.put_new/3`. + """ + def put_new(name, key, value, opts) do + do_put(name, key, value, :put_new, opts) + end + + @doc """ + Implementation for `c:Nebulex.Cache.put_new!/3`. + """ + def put_new!(name, key, value, opts) do + unwrap_or_raise put_new(name, key, value, opts) + end + + @doc """ + Implementation for `c:Nebulex.Cache.replace/3`. + """ + def replace(name, key, value, opts) do + do_put(name, key, value, :replace, opts) + end + + @doc """ + Implementation for `c:Nebulex.Cache.replace!/3`. + """ + def replace!(name, key, value, opts) do + unwrap_or_raise replace(name, key, value, opts) + end + + defp do_put(name, key, value, on_write, opts) do + {ttl, opts} = Options.pop_and_validate_timeout(opts, :ttl) + + do_put(name, key, value, ttl, on_write, opts) + end + + @compile {:inline, do_put: 6} + defcommandp do_put(name, key, value, ttl, on_write, opts), command: :put + + @doc """ + Implementation for `c:Nebulex.Cache.put_all/2`. + """ + def put_all(name, entries, opts) do + case do_put_all(name, entries, :put, opts) do + {:ok, _} -> :ok + {:error, _} = error -> error + end + end + + @doc """ + Implementation for `c:Nebulex.Cache.put_all!/2`. + """ + def put_all!(name, entries, opts) do + _ = unwrap_or_raise do_put_all(name, entries, :put, opts) + + :ok + end + + @doc """ + Implementation for `c:Nebulex.Cache.put_new_all/2`. + """ + def put_new_all(name, entries, opts) do + do_put_all(name, entries, :put_new, opts) + end + + @doc """ + Implementation for `c:Nebulex.Cache.put_new_all!/2`. + """ + def put_new_all!(name, entries, opts) do + unwrap_or_raise put_new_all(name, entries, opts) + end + + def do_put_all(_name, [], _on_write, _opts) do + {:ok, true} + end + + def do_put_all(_name, %{} = entries, _on_write, _opts) when map_size(entries) == 0 do + {:ok, true} + end + + def do_put_all(name, entries, on_write, opts) do + {ttl, opts} = Options.pop_and_validate_timeout(opts, :ttl) + + do_put_all(name, entries, ttl, on_write, opts) + end + + @compile {:inline, do_put_all: 5} + defcommandp do_put_all(name, entries, ttl, on_write, opts), command: :put_all + + @doc """ + Implementation for `c:Nebulex.Cache.delete/2`. + """ + defcommand delete(name, key, opts) + + @doc """ + Implementation for `c:Nebulex.Cache.delete!/2`. + """ + def delete!(name, key, opts) do + unwrap_or_raise delete(name, key, opts) + end + + @doc """ + Implementation for `c:Nebulex.Cache.take/2`. + """ + defcommand take(name, key, opts) + + @doc """ + Implementation for `c:Nebulex.Cache.take!/2`. + """ + def take!(name, key, opts) do + case take(name, key, opts) do + {:ok, value} -> value + {:error, reason} -> raise reason + end + end + + @doc """ + Implementation for `c:Nebulex.Cache.has_key?/1`. + """ + defcommand has_key?(name, key, opts) + + @doc """ + Implementation for `c:Nebulex.Cache.incr/3`. + """ + def incr(name, key, amount, opts) + + def incr(name, key, amount, opts) when is_integer(amount) do + {ttl, opts} = Options.pop_and_validate_timeout(opts, :ttl) + {default, opts} = Options.pop_and_validate_integer(opts, :default) + + do_incr(name, key, amount, ttl, default, opts) + end + + def incr(_name, _key, amount, _opts) do + raise ArgumentError, + "invalid value for amount argument: expected integer, got: #{inspect(amount)}" + end + + @compile {:inline, do_incr: 6} + defcommandp do_incr(name, key, amount, ttl, default, opts), command: :update_counter + + @doc """ + Implementation for `c:Nebulex.Cache.incr!/3`. + """ + def incr!(name, key, amount, opts) do + unwrap_or_raise incr(name, key, amount, opts) + end + + @doc """ + Implementation for `c:Nebulex.Cache.decr/3`. + """ + def decr(name, key, amount, opts) + + def decr(name, key, amount, opts) when is_integer(amount) do + incr(name, key, amount * -1, opts) + end + + def decr(_cache, _key, amount, _opts) do + raise ArgumentError, + "invalid value for amount argument: expected integer, got: #{inspect(amount)}" + end + + @doc """ + Implementation for `c:Nebulex.Cache.decr!/3`. + """ + def decr!(name, key, amount, opts) do + unwrap_or_raise decr(name, key, amount, opts) + end + + @doc """ + Implementation for `c:Nebulex.Cache.ttl/1`. + """ + defcommand ttl(name, key, opts) + + @doc """ + Implementation for `c:Nebulex.Cache.ttl!/1`. + """ + def ttl!(name, key, opts) do + case ttl(name, key, opts) do + {:ok, ttl} -> ttl + {:error, reason} -> raise reason + end + end + + @doc """ + Implementation for `c:Nebulex.Cache.expire/2`. + """ + def expire(name, key, ttl, opts) do + ttl = + (Time.timeout?(ttl) && ttl) || + raise ArgumentError, "expected ttl to be a valid timeout, got: #{inspect(ttl)}" + + do_expire(name, key, ttl, opts) + end + + @compile {:inline, do_expire: 4} + defcommandp do_expire(name, key, ttl, opts), command: :expire + + @doc """ + Implementation for `c:Nebulex.Cache.expire!/2`. + """ + def expire!(name, key, ttl, opts) do + unwrap_or_raise expire(name, key, ttl, opts) + end + + @doc """ + Implementation for `c:Nebulex.Cache.touch/1`. + """ + defcommand touch(name, key, opts) + + @doc """ + Implementation for `c:Nebulex.Cache.touch!/1`. + """ + def touch!(name, key, opts) do + unwrap_or_raise touch(name, key, opts) + end + + @doc """ + Implementation for `c:Nebulex.Cache.get_and_update/3`. + """ + def get_and_update(name, key, fun, opts) when is_function(fun, 1) do + {ttl, opts} = Options.pop_and_validate_timeout(opts, :ttl) + + with_meta(name, fn adapter_meta -> + with {:ok, current} <- do_get(adapter_meta, key, nil, opts) do + {:ok, eval_get_and_update_function(current, adapter_meta, key, ttl, opts, fun)} + end + end) + end + + defp eval_get_and_update_function(current, adapter_meta, key, ttl, opts, fun) do + case fun.(current) do + {get, nil} -> + {get, get} + + {get, update} -> + {:ok, true} = run_command(adapter_meta, :put, [key, update, ttl, :put, opts]) + + {get, update} + + :pop when is_nil(current) -> + {nil, nil} + + :pop -> + :ok = run_command(adapter_meta, :delete, [key, opts]) + + {current, nil} + + other -> + raise ArgumentError, + "the given function must return a two-element tuple or :pop," <> + " got: #{inspect(other)}" + end + end + + @doc """ + Implementation for `c:Nebulex.Cache.get_and_update!/3`. + """ + def get_and_update!(name, key, fun, opts) do + unwrap_or_raise get_and_update(name, key, fun, opts) + end + + @doc """ + Implementation for `c:Nebulex.Cache.update/4`. + """ + def update(name, key, initial, fun, opts) when is_function(fun, 1) do + {ttl, opts} = Options.pop_and_validate_timeout(opts, :ttl) + + with_meta(name, fn adapter_meta -> + value = + case run_command(adapter_meta, :fetch, [key, opts]) do + {:ok, value} -> fun.(value) + {:error, %Nebulex.KeyError{key: ^key}} -> initial + {:error, _} = error -> throw({:return, error}) + end + + with {:ok, true} <- run_command(adapter_meta, :put, [key, value, ttl, :put, opts]) do + {:ok, value} + end + end) + catch + {:return, error} -> error + end + + @doc """ + Implementation for `c:Nebulex.Cache.update!/4`. + """ + def update!(name, key, initial, fun, opts) do + unwrap_or_raise update(name, key, initial, fun, opts) + end +end diff --git a/lib/nebulex/cache/options.ex b/lib/nebulex/cache/options.ex new file mode 100644 index 00000000..e56f76b4 --- /dev/null +++ b/lib/nebulex/cache/options.ex @@ -0,0 +1,280 @@ +defmodule Nebulex.Cache.Options do + @moduledoc false + + alias Nebulex.{Time, Utils} + + # Compilation time option definitions + compile_opts = [ + otp_app: [ + type: :atom, + required: true, + doc: """ + The `:otp_app` should point to an OTP application that has the cache + configuration. + """ + ], + adapter: [ + type: {:custom, __MODULE__, :__validate_behaviour__, [Nebulex.Adapter, "adapter"]}, + type_doc: "`t:module/0`", + required: true, + doc: """ + The cache adapter module. + """ + ], + default_dynamic_cache: [ + type: :atom, + required: false, + doc: """ + The default dynamic cache for executing cache commands. By default, + it is set to the defined cache module. For example, when you call + `MyApp.Cache.start_link/1`, it will start a cache with the name + `MyApp.Cache`. + + See ["Dynamic caches"](#module-dynamic-caches) section + for more information. + """ + ] + ] + + # Start option definitions (runtime) + start_link_opts = [ + name: [ + type: {:custom, __MODULE__, :__validate_name__, []}, + type_doc: "`t:atom/0` | `{:via, reg_mod :: module(), via_name :: any()}`", + required: false, + doc: """ + The name of the Cache supervisor process. By default, it is set to the + defined cache module. When you call `MyApp.Cache.start_link/1`, it will + start a cache with the name `MyApp.Cache`. + """ + ], + telemetry: [ + type: :boolean, + required: false, + default: true, + doc: """ + Whether Telemetry span events for cache commands are dispatched. + """ + ], + telemetry_prefix: [ + type: {:list, :atom}, + required: false, + doc: """ + Nebulex emits cache events using the [Telemetry](`:telemetry`) library. + By default, the telemetry prefix is based on the module name, so if your + module is called `MyApp.Cache`, the prefix will be `[:my_app, :cache]`. + See the ["Telemetry events"](#module-telemetry-events) section to see + which events are emitted by Nebulex out-of-box. + + Note that if you have multiple caches (dynamic caches), you should keep + the `:telemetry_prefix` consistent for each cache and use either the + `:cache` or the `:name` property in the event metadata for distinguishing + between caches. + """ + ] + ] + + # Shared option definitions (runtime) + runtime_shared_opts = [ + telemetry_event: [ + type: {:list, :atom}, + required: false, + doc: """ + The telemetry event name to dispatch the event under. Defaults to what + is configured in the `:telemetry_prefix` option. See the + ["Telemetry events"](#module-telemetry-events) section + for more information. + """ + ], + telemetry_metadata: [ + type: {:map, :any, :any}, + required: false, + default: %{}, + doc: """ + Extra metadata to add to the Telemetry events on cache commands. + These end up in the `:extra_metadata` metadata key of these events. + See the ["Telemetry events"](#module-telemetry-events) section + for more information. + """ + ] + ] + + # Runtime common option definitions for write operations + runtime_common_write_opts = [ + ttl: [ + type: :timeout, + required: false, + default: :infinity, + doc: """ + The key's time-to-live (or expiry time) in **milliseconds**. + """ + ] + ] + + # Runtime option definitions for updating counter + update_counter_opts = [ + default: [ + type: :integer, + required: false, + default: 0, + doc: """ + If the key is not present in Cache, the default value is inserted as the + initial value of the key before it is incremented. + """ + ] + ] + + # Compilation time options schema + @compile_opts_schema NimbleOptions.new!(compile_opts) + + # Start options schema + @start_link_opts_schema NimbleOptions.new!(start_link_opts) + + # Shared options schema + @runtime_shared_opts_schema NimbleOptions.new!(runtime_shared_opts) + + # Runtime common write operations schema + @runtime_common_write_opts_schema NimbleOptions.new!(runtime_common_write_opts) + + # Update counter options schema + @update_counter_opts_schema NimbleOptions.new!(runtime_common_write_opts ++ update_counter_opts) + + ## Docs API + + @spec compile_options_docs() :: binary() + def compile_options_docs do + NimbleOptions.docs(@compile_opts_schema) + end + + @spec start_link_options_docs() :: binary() + def start_link_options_docs do + NimbleOptions.docs(@start_link_opts_schema) + end + + @spec runtime_shared_options_docs() :: binary() + def runtime_shared_options_docs do + NimbleOptions.docs(@runtime_shared_opts_schema) + end + + @spec runtime_common_write_options_docs() :: binary() + def runtime_common_write_options_docs do + NimbleOptions.docs(@runtime_common_write_opts_schema) + end + + @spec update_counter_options_docs() :: binary() + def update_counter_options_docs do + NimbleOptions.docs(@update_counter_opts_schema) + end + + ## Validation API + + @spec validate_compile_opts!(keyword()) :: keyword() + def validate_compile_opts!(opts) do + validate!(opts, @compile_opts_schema) + end + + @spec validate_start_opts!(keyword()) :: keyword() + def validate_start_opts!(opts) do + start_link_opts = + opts + |> Keyword.take(Keyword.keys(@start_link_opts_schema.schema)) + |> validate!(@start_link_opts_schema) + + Keyword.merge(opts, start_link_opts) + end + + @spec validate_runtime_shared_opts!(keyword()) :: keyword() + def validate_runtime_shared_opts!(opts) do + validate!(opts, @runtime_shared_opts_schema) + end + + @spec validate!(keyword(), NimbleOptions.t()) :: keyword() + def validate!(opts, schema) do + opts + |> NimbleOptions.validate(schema) + |> format_error() + end + + defp format_error({:ok, opts}) do + opts + end + + defp format_error({:error, %NimbleOptions.ValidationError{message: message}}) do + raise ArgumentError, message + end + + @doc false + def __validate_name__(name) + + def __validate_name__(name) when is_atom(name) do + {:ok, name} + end + + def __validate_name__({:via, _reg_mod, _reg_name}) do + {:ok, nil} + end + + @doc false + def __validate_behaviour__(value, behaviour, msg \\ "module") + + def __validate_behaviour__(value, behaviour, msg) when is_atom(value) do + with {:module, module} <- Code.ensure_compiled(value), + behaviours = Utils.module_behaviours(module), + true <- behaviour in behaviours do + {:ok, module} + else + {:error, _} -> + msg = + "#{msg} #{inspect(value)} was not compiled, " <> + "ensure it is correct and it is included as a project dependency" + + {:error, msg} + + false -> + msg = + "expected the #{msg} module given to Nebulex.Cache " <> + "to list #{inspect(behaviour)} as a behaviour" + + {:error, msg} + end + end + + def __validate_behaviour__(value, _behaviour, _msg) do + {:error, "expected a module, got: #{inspect(value)}"} + end + + ## Extras + + @spec pop_and_validate_timeout(keyword(), any()) :: {timeout(), keyword()} + def pop_and_validate_timeout(opts, key) do + case Keyword.pop(opts, key) do + {nil, opts} -> + {:infinity, opts} + + {ttl, opts} -> + if not Time.timeout?(ttl) do + raise ArgumentError, + "invalid value for #{inspect(key)} option: expected " <> + "non-negative integer or :infinity, got: #{inspect(ttl)}" + end + + {ttl, opts} + end + end + + @spec pop_and_validate_integer(keyword(), any()) :: {integer(), keyword()} + def pop_and_validate_integer(opts, key) do + case Keyword.pop(opts, key) do + {nil, opts} -> + {0, opts} + + {val, opts} when is_integer(val) -> + {val, opts} + + {val, _opts} -> + raise ArgumentError, + "invalid value for #{inspect(key)} option: expected integer, " <> + "got: #{inspect(val)}" + end + end +end diff --git a/lib/nebulex/cache/persistence.ex b/lib/nebulex/cache/persistence.ex index 133cf988..3b6dd543 100644 --- a/lib/nebulex/cache/persistence.ex +++ b/lib/nebulex/cache/persistence.ex @@ -1,19 +1,30 @@ defmodule Nebulex.Cache.Persistence do @moduledoc false - alias Nebulex.Adapter + import Nebulex.Adapter, only: [defcommand: 1] + import Nebulex.Utils, only: [unwrap_or_raise: 1] @doc """ Implementation for `c:Nebulex.Cache.dump/2`. """ - def dump(name, path, opts) do - Adapter.with_meta(name, & &1.dump(&2, path, opts)) + defcommand dump(name, path, opts) + + @doc """ + Implementation for `c:Nebulex.Cache.dump!/2`. + """ + def dump!(name, path, opts) do + unwrap_or_raise dump(name, path, opts) end @doc """ Implementation for `c:Nebulex.Cache.load/2`. """ - def load(name, path, opts) do - Adapter.with_meta(name, & &1.load(&2, path, opts)) + defcommand load(name, path, opts) + + @doc """ + Implementation for `c:Nebulex.Cache.load!/2`. + """ + def load!(name, path, opts) do + unwrap_or_raise load(name, path, opts) end end diff --git a/lib/nebulex/cache/queryable.ex b/lib/nebulex/cache/queryable.ex index 154a9ecc..cbeb7f80 100644 --- a/lib/nebulex/cache/queryable.ex +++ b/lib/nebulex/cache/queryable.ex @@ -1,29 +1,45 @@ defmodule Nebulex.Cache.Queryable do @moduledoc false - alias Nebulex.Adapter + import Nebulex.Adapter, only: [defcommand: 2, defcommandp: 2] + import Nebulex.Utils, only: [unwrap_or_raise: 1] @default_page_size 20 @doc """ - Implementation for `c:Nebulex.Cache.all/2`. + Implementation for `c:Nebulex.Cache.get_all/2`. """ - def all(name, query, opts) do - Adapter.with_meta(name, & &1.execute(&2, :all, query, opts)) + defcommand get_all(name, query, opts), command: :execute, largs: [:get_all] + + @doc """ + Implementation for `c:Nebulex.Cache.get_all!/2`. + """ + def get_all!(name, query, opts) do + unwrap_or_raise get_all(name, query, opts) end @doc """ Implementation for `c:Nebulex.Cache.count_all/2`. """ - def count_all(name, query, opts) do - Adapter.with_meta(name, & &1.execute(&2, :count_all, query, opts)) + defcommand count_all(name, query, opts), command: :execute, largs: [:count_all] + + @doc """ + Implementation for `c:Nebulex.Cache.count_all!/2`. + """ + def count_all!(name, query, opts) do + unwrap_or_raise count_all(name, query, opts) end @doc """ Implementation for `c:Nebulex.Cache.delete_all/2`. """ - def delete_all(name, query, opts) do - Adapter.with_meta(name, & &1.execute(&2, :delete_all, query, opts)) + defcommand delete_all(name, query, opts), command: :execute, largs: [:delete_all] + + @doc """ + Implementation for `c:Nebulex.Cache.delete_all!/2`. + """ + def delete_all!(name, query, opts) do + unwrap_or_raise delete_all(name, query, opts) end @doc """ @@ -31,6 +47,17 @@ defmodule Nebulex.Cache.Queryable do """ def stream(name, query, opts) do opts = Keyword.put_new(opts, :page_size, @default_page_size) - Adapter.with_meta(name, & &1.stream(&2, query, opts)) + + do_stream(name, query, opts) + end + + @compile {:inline, do_stream: 3} + defcommandp do_stream(name, query, opts), command: :stream + + @doc """ + Implementation for `c:Nebulex.Cache.stream!/2`. + """ + def stream!(name, query, opts) do + unwrap_or_raise stream(name, query, opts) end end diff --git a/lib/nebulex/cache/registry.ex b/lib/nebulex/cache/registry.ex index 32a7869a..e80a394a 100644 --- a/lib/nebulex/cache/registry.ex +++ b/lib/nebulex/cache/registry.ex @@ -3,49 +3,76 @@ defmodule Nebulex.Cache.Registry do use GenServer + import Nebulex.Utils, only: [wrap_error: 2] + ## API - @spec start_link(Keyword.t()) :: GenServer.on_start() + @spec start_link(keyword) :: GenServer.on_start() def start_link(_opts) do GenServer.start_link(__MODULE__, :ok, name: __MODULE__) end - @spec register(pid, term) :: :ok - def register(pid, value) when is_pid(pid) do - GenServer.call(__MODULE__, {:register, pid, value}) + @spec register(pid, atom, term) :: :ok + def register(pid, name, value) when is_pid(pid) and is_atom(name) do + GenServer.call(__MODULE__, {:register, pid, name, value}) end - @spec lookup(atom | pid) :: term + @spec lookup(atom | pid) :: {:ok, term} | {:error, Nebulex.Error.t()} + def lookup(name_or_pid) + def lookup(name) when is_atom(name) do - name - |> GenServer.whereis() - |> Kernel.||(raise Nebulex.RegistryLookupError, name: name) - |> lookup() + if pid = GenServer.whereis(name) do + lookup(pid) + else + wrap_error Nebulex.Error, reason: :registry_lookup_error, cache: name + end end def lookup(pid) when is_pid(pid) do - {_ref, value} = :persistent_term.get({__MODULE__, pid}) - value + case :persistent_term.get({__MODULE__, pid}, nil) do + {_ref, _name, value} -> + {:ok, value} + + nil -> + wrap_error Nebulex.Error, reason: :registry_lookup_error, cache: pid + end + end + + @spec all_running() :: [atom | pid] + def all_running do + for {{__MODULE__, pid}, {_ref, name, _value}} <- :persistent_term.get() do + name || pid + end end ## GenServer Callbacks @impl true def init(:ok) do - {:ok, :ok} + {:ok, nil} end @impl true - def handle_call({:register, pid, value}, _from, state) do + def handle_call({:register, pid, name, value}, _from, state) do + # Monitor the process so that when it is down it can be removed ref = Process.monitor(pid) - :ok = :persistent_term.put({__MODULE__, pid}, {ref, value}) + + # Store the process data + :ok = :persistent_term.put({__MODULE__, pid}, {ref, name, value}) + + # Reply with success {:reply, :ok, state} end @impl true def handle_info({:DOWN, ref, _type, pid, _reason}, state) do - {^ref, _} = :persistent_term.get({__MODULE__, pid}) + # Check the process reference + {^ref, _, _} = :persistent_term.get({__MODULE__, pid}) + + # Remove the process data _ = :persistent_term.erase({__MODULE__, pid}) + + # Continue {:noreply, state} end end diff --git a/lib/nebulex/cache/stats.ex b/lib/nebulex/cache/stats.ex deleted file mode 100644 index 1a2b9611..00000000 --- a/lib/nebulex/cache/stats.ex +++ /dev/null @@ -1,39 +0,0 @@ -defmodule Nebulex.Cache.Stats do - @moduledoc false - - alias Nebulex.Adapter - - ## API - - @doc """ - Implementation for `c:Nebulex.Cache.stats/0`. - """ - def stats(name) do - Adapter.with_meta(name, & &1.stats(&2)) - end - - if Code.ensure_loaded?(:telemetry) do - @doc """ - Implementation for `c:Nebulex.Cache.dispatch_stats/1`. - """ - def dispatch_stats(name, opts \\ []) do - Adapter.with_meta(name, fn adapter, meta -> - with true <- is_list(meta.telemetry_prefix), - %Nebulex.Stats{} = info <- adapter.stats(meta) do - :telemetry.execute( - meta.telemetry_prefix ++ [:stats], - info.measurements, - Map.merge(info.metadata, opts[:metadata] || %{}) - ) - else - _ -> :ok - end - end) - end - else - @doc """ - Implementation for `c:Nebulex.Cache.dispatch_stats/1`. - """ - def dispatch_stats(_name, _opts \\ []), do: :ok - end -end diff --git a/lib/nebulex/cache/supervisor.ex b/lib/nebulex/cache/supervisor.ex index 352729ec..d5ebefbd 100644 --- a/lib/nebulex/cache/supervisor.ex +++ b/lib/nebulex/cache/supervisor.ex @@ -1,22 +1,28 @@ defmodule Nebulex.Cache.Supervisor do @moduledoc false + use Supervisor - import Nebulex.Helpers + import Nebulex.Cache.Options + import Nebulex.Utils alias Nebulex.Telemetry @doc """ Starts the cache manager supervisor. """ + @spec start_link(module(), atom(), module(), keyword()) :: Supervisor.on_start() def start_link(cache, otp_app, adapter, opts) do - sup_opts = if name = Keyword.get(opts, :name, cache), do: [name: name], else: [] - Supervisor.start_link(__MODULE__, {cache, otp_app, adapter, opts}, sup_opts) + name = Keyword.get(opts, :name, cache) + sup_opts = if name, do: [name: name], else: [] + + Supervisor.start_link(__MODULE__, {name, cache, otp_app, adapter, opts}, sup_opts) end @doc """ Retrieves the runtime configuration. """ + @spec runtime_config(module(), atom(), keyword()) :: {:ok, keyword()} | :ignore def runtime_config(cache, otp_app, opts) do config = otp_app @@ -24,7 +30,7 @@ defmodule Nebulex.Cache.Supervisor do |> Keyword.merge(opts) |> Keyword.put(:otp_app, otp_app) |> Keyword.put_new_lazy(:telemetry_prefix, fn -> telemetry_prefix(cache) end) - |> Keyword.update(:telemetry, true, &(is_boolean(&1) && &1)) + |> validate_start_opts!() cache_init(cache, config) end @@ -40,54 +46,71 @@ defmodule Nebulex.Cache.Supervisor do @doc """ Retrieves the compile time configuration. """ + @spec compile_config(keyword()) :: {atom(), module(), [module()], keyword()} def compile_config(opts) do - otp_app = opts[:otp_app] || raise ArgumentError, "expected otp_app: to be given as argument" - adapter = opts[:adapter] || raise ArgumentError, "expected adapter: to be given as argument" - - behaviours = module_behaviours(adapter, "adapter") + # Validate options + opts = validate_compile_opts!(opts) - unless Nebulex.Adapter in behaviours do - raise ArgumentError, - "expected :adapter option given to Nebulex.Cache to list Nebulex.Adapter as a behaviour" - end + otp_app = Keyword.fetch!(opts, :otp_app) + adapter = Keyword.fetch!(opts, :adapter) + behaviours = module_behaviours(adapter) - {otp_app, adapter, behaviours} + {otp_app, adapter, behaviours, opts} end ## Supervisor Callbacks @impl true - def init({cache, otp_app, adapter, opts}) do + def init({name, cache, otp_app, adapter, opts}) do + # Normalize name to atom, ignore via/global names + name = if is_atom(name), do: name, else: nil + case runtime_config(cache, otp_app, opts) do {:ok, opts} -> - Telemetry.execute( - [:nebulex, :cache, :init], - %{system_time: System.system_time()}, - %{cache: cache, opts: opts} - ) - + # Dispatch Telemetry event notifying the cache is started + :ok = + Telemetry.execute( + [:nebulex, :cache, :init], + %{system_time: System.system_time()}, + %{name: name, cache: cache, opts: opts} + ) + + # Init the adapter {:ok, child, meta} = adapter.init([cache: cache] ++ opts) - meta = Map.put(meta, :cache, cache) - child_spec = wrap_child_spec(child, [adapter, meta]) + + # Add required keys to the metadata + meta = + Map.merge(meta, %{ + name: name, + cache: cache, + adapter: adapter, + telemetry: Keyword.fetch!(opts, :telemetry), + telemetry_prefix: Keyword.fetch!(opts, :telemetry_prefix) + }) + + # Build child spec + child_spec = wrap_child_spec(child, [name, meta]) + + # Init the cache supervisor Supervisor.init([child_spec], strategy: :one_for_one, max_restarts: 0) - other -> - other + :ignore -> + :ignore end end ## Helpers @doc false - def start_child({mod, fun, args}, adapter, meta) do - case apply(mod, fun, args) do - {:ok, pid} -> - meta = Map.put(meta, :pid, pid) - :ok = Nebulex.Cache.Registry.register(self(), {adapter, meta}) - {:ok, pid} - - other -> - other + def start_child({mod, fun, args}, name, meta) do + with {:ok, pid} <- apply(mod, fun, args) do + # Add the PID to the metadata + meta = Map.put(meta, :pid, pid) + + # Register the started cache's pid + :ok = Nebulex.Cache.Registry.register(self(), name, meta) + + {:ok, pid} end end diff --git a/lib/nebulex/cache/transaction.ex b/lib/nebulex/cache/transaction.ex index c23fbd7d..863c95ee 100644 --- a/lib/nebulex/cache/transaction.ex +++ b/lib/nebulex/cache/transaction.ex @@ -1,19 +1,20 @@ defmodule Nebulex.Cache.Transaction do @moduledoc false - alias Nebulex.Adapter + import Nebulex.Adapter, only: [defcommand: 1, defcommandp: 2] @doc """ Implementation for `c:Nebulex.Cache.transaction/2`. """ - def transaction(name, fun, opts) do - Adapter.with_meta(name, & &1.transaction(&2, fun, opts)) + def transaction(name, fun, opts) when is_function(fun, 0) do + do_transaction(name, fun, opts) end + @compile {:inline, do_transaction: 3} + defcommandp do_transaction(name, fun, opts), command: :transaction + @doc """ - Implementation for `c:Nebulex.Cache.in_transaction?/0`. + Implementation for `c:Nebulex.Cache.in_transaction?/1`. """ - def in_transaction?(name) do - Adapter.with_meta(name, & &1.in_transaction?(&2)) - end + defcommand in_transaction?(name, opts) end diff --git a/lib/nebulex/caching.ex b/lib/nebulex/caching.ex index bd8aa3ca..c2d04f1c 100644 --- a/lib/nebulex/caching.ex +++ b/lib/nebulex/caching.ex @@ -15,30 +15,86 @@ if Code.ensure_loaded?(Decorator.Define) do is applied transparently without any interference to the invoker. See **`Nebulex.Caching.Decorators`** for more information about - **"Declarative annotation-based caching"**. + **"Declarative decorator-based caching"**. + + ## Options + + The following are the available compilation time options when defining + the caching usage via `use Nebulex.Caching`: + + #{Nebulex.Caching.Options.caching_options_docs()} + + These options are applied to all decorated functions in a module, but they + can be overridden on each decorator declaration. They act as a global or + default configuration for the decorators. For example, if the cache is the + same for all decorated functions in a module, one can configure it globally + like so: `use Nebulex.Caching, cache: MyCache`. Therefore, the `:cache` + option is not required on the decorator declarations. """ + alias Nebulex.Caching.{Decorators, Options} + @doc false - defmacro __using__(_opts) do - quote do + defmacro __using__(opts \\ []) do + quote bind_quoted: [opts: opts] do + # Validate options + opts = + opts + |> Macro.escape() + |> Options.validate_caching_opts!() + + # Set the __using__ macro options so they can be used in the decorators + :ok = Module.put_attribute(__MODULE__, :__caching_opts__, opts) + use Nebulex.Caching.Decorators + import Nebulex.Caching end end - alias Nebulex.Caching.Decorators + @doc """ + A wrapper macro for `Nebulex.Caching.Decorators.dynamic_cache_spec/2`. + + Defines a dynamic cache to use in the decorated function. + + The first argument `cache` specifies the defined cache module, + and the second argument `name` is the actual name of the cache. + + > **NOTE:** This macro is automatically imported and then available + > when using `use Nebulex.Caching`. + + ## Example + + defmodule MyApp.Users do + use Nebulex.Caching + + @decorate cacheable(cache: dynamic_cache(MyApp.Cache, :users)) + def get_user(id) do + # your logic ... + end + end + + See the **"`:cache` option"** section at the `Nebulex.Caching.Decorators` + module documentation for more information. + """ + defmacro dynamic_cache(cache, name) do + quote do + Decorators.dynamic_cache_spec(unquote(cache), unquote(name)) + end + end @doc """ - A wrapper macro for `Nebulex.Caching.Decorators.build_keyref/2`. + A wrapper macro for `Nebulex.Caching.Decorators.keyref_spec/2`. - This macro is imported automatically with `use Nebulex.Caching`, - which means you don't need to do any additional `alias` or `import`. + See `Nebulex.Caching.Decorators.cacheable/3` decorator + for more information about its usage. - See `cacheable/3` decorator for more information about its usage. + > **NOTE:** This macro is automatically imported and then available + > when using `use Nebulex.Caching`. """ defmacro keyref(cache \\ nil, key) do quote do - Decorators.build_keyref(unquote(cache), unquote(key)) + Decorators.keyref_spec(unquote(cache), unquote(key)) end end end diff --git a/lib/nebulex/caching/decorators.ex b/lib/nebulex/caching/decorators.ex index 5c55716f..7374d62d 100644 --- a/lib/nebulex/caching/decorators.ex +++ b/lib/nebulex/caching/decorators.ex @@ -1,15 +1,18 @@ if Code.ensure_loaded?(Decorator.Define) do defmodule Nebulex.Caching.Decorators do @moduledoc """ - Declarative annotation-based caching via function - [decorators](https://github.com/arjan/decorator). + Declarative decorator-based caching, inspired by + [Spring Cache Abstraction][spring-cache]. + + > *[`decorator`][decorator-lib] library is used underneath.* + + [spring-cache]: https://docs.spring.io/spring/docs/3.2.x/spring-framework-reference/html/cache.html + [decorator-lib]: https://github.com/arjan/decorator For caching declaration, the abstraction provides three Elixir function decorators: `cacheable `, `cache_evict`, and `cache_put`, which allow functions to trigger cache population or cache eviction. - Let us take a closer look at each annotation. - - > Inspired by [Spring Cache Abstraction](https://docs.spring.io/spring/docs/3.2.x/spring-framework-reference/html/cache.html). + Let us take a closer look at each decorator. ## `cacheable` decorator @@ -17,12 +20,13 @@ if Code.ensure_loaded?(Decorator.Define) do cacheable - that is, functions for whom the result is stored into the cache so, on subsequent invocations (with the same arguments), the value in the cache is returned without having to actually execute the function. In its - simplest form, the decorator/annotation declaration requires the name of - the cache associated with the annotated function: + simplest form, the decorator declaration requires the cache associated with + the decorated function if the [default cache](#module-default-cache) is not + configured (see ["Cache configuration"](#module-cache-configuration)): @decorate cacheable(cache: Cache) - def get_account(id) do - # the logic for retrieving the account ... + def find_book(isbn) do + # the logic for retrieving the book ... end In the snippet above, the function `get_account/1` is associated with the @@ -30,301 +34,280 @@ if Code.ensure_loaded?(Decorator.Define) do to see whether the invocation has been already executed and does not have to be repeated. - ### Default Key Generation + See `cacheable/3` for more information. - Since caches are essentially key-value stores, each invocation of a cached - function needs to be translated into a suitable key for cache access. - Out of the box, the caching abstraction uses a simple key-generator - based on the following algorithm: + ## `cache_put` decorator - * If no params are given, return `0`. - * If only one param is given, return that param as key. - * If more than one param is given, return a key computed from the hashes - of all parameters (`:erlang.phash2(args)`). + For cases where the cache needs to be updated without interfering with the + function execution, one can use the `cache_put` decorator. That is, the + function will always be executed and its result placed into the cache + (according to the `cache_put` options). It supports the same options as + `cacheable` and should be used for cache population or update rather than + function flow optimization. - > **IMPORTANT:** Since Nebulex v2.1.0, the default key generation implements - the algorithm described above, breaking backward compatibility with older - versions. Therefore, you may need to change your code in case of using the - default key generation. + @decorate cache_put(cache: Cache) + def update_book(isbn) do + # the logic for retrieving the book and then updating it ... + end - The default key generator is provided by the cache via the callback - `c:Nebulex.Cache.__default_key_generator__/0` and it is applied only - if the option `key:` or `keys:` is not configured. Defaults to - `Nebulex.Caching.SimpleKeyGenerator`. You can change the default - key generator at compile time with the option `:default_key_generator`. - For example, one can define a cache with a default key generator as: + Note that using `cache_put` and `cacheable` decorators on the same function + is generally discouraged because they have different behaviors. While the + latter causes the function execution to be skipped by using the cache, the + former forces the execution in order to execute a cache update. This leads + to unexpected behavior and with the exception of specific corner-cases + (such as decorators having conditions that exclude them from each other), + such declarations should be avoided. - defmodule MyApp.Cache do - use Nebulex.Cache, - otp_app: :my_app, - adapter: Nebulex.Adapters.Local, - default_key_generator: __MODULE__ + See `cache_put/3` for more information. - @behaviour Nebulex.Caching.KeyGenerator + ## `cache_evict` decorator - @impl true - def generate(mod, fun, args), do: :erlang.phash2({mod, fun, args}) + The cache abstraction allows not just the population of a cache store but + also eviction. This process is useful for removing stale or unused data from + the cache. Opposed to `cacheable`, the decorator `cache_evict` demarcates + functions that perform cache eviction, which are functions that act as + triggers for removing data from the cache. Just like its sibling, + `cache_evict` requires specifying the cache that will be affected by the + action, allows to provide a key or a list of keys to be evicted, but in + addition, features an extra option `:all_entries` which indicates whether + a cache-wide eviction needs to be performed rather than just one or a few + entries (based on `:key` or `:keys` option): + + @decorate cache_evict(cache: Cache, all_entries: true) + def load_books(file_stream) do + # the logic for loading books ... end - The key generator module must implement the `Nebulex.Caching.KeyGenerator` - behaviour. + The option `:all_entries` comes in handy when an entire cache region needs + to be cleared out - rather than evicting each entry (which would take a + long time since it is inefficient), all the entries are removed in one + operation as shown above. - > **IMPORTANT:** There are some caveats to keep in mind when using - the key generator, therefore, it is highly recommended to review - `Nebulex.Caching.KeyGenerator` behaviour documentation before. + One can also indicate whether the eviction should occur after (the default) + or before the function executes through the `:before_invocation` attribute. + The former provides the same semantics as the rest of the decorators; once + the method completes successfully, an action (in this case, eviction) on the + cache is executed. If the function does not execute (as it might be cached) + or an exception is raised, the eviction does not occur. The latter + (`before_invocation: true`) causes the eviction to occur always before the + method is invoked. This is useful in cases where the eviction does not need + to be tied to the function execution outcome. - Also, you can provide a different key generator at any time - (overriding the default one) when using any caching annotation - through the option `:key_generator`. For example: + See `cache_evict/3` for more information. - # With a module implementing the key-generator behaviour - @decorate cache_put(cache: Cache, key_generator: CustomKeyGenerator) - def update_account(account) do - # the logic for updating the given entity ... - end + ## Shared Options - # With the shorthand tuple {module, args} - @decorate cache_put( - cache: Cache, - key_generator: {CustomKeyGenerator, [account.name]} - ) - def update_account2(account) do - # the logic for updating the given entity ... - end + All three cache decorators explained previously accept the following + options: - # With a MFA tuple - @decorate cache_put( - cache: Cache, - key_generator: {AnotherModule, :genkey, [account.id]} - ) - def update_account3(account) do - # the logic for updating the given entity ... - end + #{Nebulex.Caching.Options.shared_options_docs()} - > The `:key_generator` option is available for all caching annotations. + ## Cache configuration - ### Custom Key Generation Declaration + As it's shown in the options above, the cache can be configured in the + decorator declaration with the option `:cache`. However, there are three + possible values, such as documented in the `t:cache/0` type. Let's go over + these cache value alternatives in detail. - Since caching is generic, it is quite likely the target functions have - various signatures that cannot be simply mapped on top of the cache - structure. This tends to become obvious when the target function has - multiple arguments out of which only some are suitable for caching - (while the rest are used only by the function logic). For example: + ### Cache module - @decorate cacheable(cache: Cache) - def get_account(email, include_users?) do - # the logic for retrieving the account ... + The first cache value option is an existing cache module; this is the most + common value. For example: + + @decorate cacheable(cache: MyApp.Cache) + def find_book(isbn) do + # the logic for retrieving the book ... end - At first glance, while the boolean argument influences the way the account - is found, it is no use for the cache. + ### Dynamic cache - For such cases, the `cacheable` decorator allows the user to specify the - key explicitly based on the function attributes. + In case one is using a dynamic cache: - @decorate cacheable(cache: Cache, key: {Account, email}) - def get_account(email, include_users?) do - # the logic for retrieving the account ... + @decorate cacheable(cache: dynamic_cache(MyApp.Cache, :books)) + def find_book(isbn) do + # the logic for retrieving the book ... end - @decorate cacheable(cache: Cache, key: {Account, user.account_id}) - def get_user_account(%User{} = user) do - # the logic for retrieving the account ... - end + > See ["Dynamic caches"][dynamic-caches] for more information. - It is also possible passing options to the cache, like so: + [dynamic-caches]: https://hexdocs.pm/nebulex/Nebulex.Cache.html#module-dynamic-caches - @decorate cacheable( - cache: Cache, - key: {Account, email}, - opts: [ttl: 300_000] - ) - def get_account(email, include_users?) do - # the logic for retrieving the account ... - end + ### Anonymous function - See the **"Shared Options"** section below. + Finally, it is also possible to configure an anonymous function to resolve + the cache value in runtime. The function receives the + [decorator context](`t:context/0`) as an argument and must return either + a cache module or a dynamic cache. - ### Functions with multiple clauses - - Since [decorator lib](https://github.com/arjan/decorator#functions-with-multiple-clauses) - is used, it is important to be aware of its recommendations, warns, - limitations, and so on. In this case, for functions with multiple clauses - the general advice is to create an empty function head, and call the - decorator on that head, like so: + @decorate cacheable(cache: &MyApp.Resolver.resolve_cache/1) + def find_book(isbn) do + # the logic for retrieving the book ... + end - @decorate cacheable(cache: Cache, key: email) - def get_account(email \\\\ nil) + Where `resolve_cache` function may look like this: - def get_account(nil), do: nil + defmodule MyApp.Resolver do + alias Nebulex.Caching.Decorators.Context - def get_account(email) do - # the logic for retrieving the account ... + def resolve_cache(%Context{} = context) do + # the logic for generating the cache value + end end - ## `cache_put` decorator + ## Default cache - For cases where the cache needs to be updated without interfering with the - function execution, one can use the `cache_put` decorator. That is, the - method will always be executed and its result placed into the cache - (according to the `cache_put` options). It supports the same options as - `cacheable`. + While option `:cache` is handy for specifying the cache to be used by the + decorated function, it may be a bit cumbersome when there is a module with + several decorated functions, and all use the same cache. Because in that + case, we will have to set the `:cache` option with the same value in all + the decorated functions. Fortunately, the `:cache` option can be configured + globally for all decorated functions in a module when defining the caching + usage via `use Nebulex.Caching`. For example: - @decorate cache_put(cache: Cache, key: {Account, acct.email}) - def update_account(%Account{} = acct, attrs) do - # the logic for updating the account ... - end + defmodule MyApp.Books do + use Nebulex.Caching, cache: MyApp.Cache - Note that using `cache_put` and `cacheable` annotations on the same function - is generally discouraged because they have different behaviors. While the - latter causes the method execution to be skipped by using the cache, the - former forces the execution in order to execute a cache update. This leads - to unexpected behavior and with the exception of specific corner-cases - (such as decorators having conditions that exclude them from each other), - such declarations should be avoided. + @decorate cacheable() + def get_book(isbn) do + # the logic for retrieving a book ... + end - ## `cache_evict` decorator + @decorate cacheable(cache: MyApp.BestSellersCache) + def best_sellers do + # the logic for retrieving best seller books ... + end - The cache abstraction allows not just the population of a cache store but - also eviction. This process is useful for removing stale or unused data from - the cache. Opposed to `cacheable`, the decorator `cache_evict` demarcates - functions that perform cache eviction, which are functions that act as - triggers for removing data from the cache. The `cache_evict` decorator not - only allows a key to be specified, but also a set of keys. Besides, extra - options like`all_entries` which indicates whether a cache-wide eviction - needs to be performed rather than just an entry one (based on the key or - keys): - - @decorate cache_evict(cache: Cache, key: {Account, email}) - def delete_account_by_email(email) do - # the logic for deleting the account ... + ... end - @decorate cacheable( - cache: Cache, - keys: [{Account, acct.id}, {Account, acct.email}] - ) - def delete_account(%Account{} = acct) do - # the logic for deleting the account ... - end + In the snippet above, the function `get_book/1` is associated with the + cache `MyApp.Cache` by default since option `:cache` is not provided in + the decorator. In other words, when the `:cache` option is configured + globally (when defining the caching usage via `use Nebulex.Caching`), + it is not required in the decorator declaration. However, one can always + override the global or default cache in the decorator declaration by + providing the option `:cache`, as is shown in the `best_sellers/0` + function, which is associated with a different cache. - @decorate cacheable(cache: Cache, all_entries: true) - def delete_all_accounts do - # the logic for deleting all the accounts ... - end + To conclude, it is crucial to know the decorator must be associated with + a cache, either a global or default cache defined at the caching usage + definition (e.g.: `use Nebulex.Caching, cache: MyCache`), or a specific + one configured in the decorator itself. - The option `all_entries:` comes in handy when an entire cache region needs - to be cleared out - rather than evicting each entry (which would take a - long time since it is inefficient), all the entries are removed in one - operation as shown above. + ## Key Generation - ## Shared Options + Since caches are essentially key-value stores, each invocation of a cached + function needs to be translated into a suitable key for cache access. The + key can be generated using a default key generator (which is configurable) + or through decorator options `:key` or `:keys`. Let us take a closer look + at each approach: - All three cache annotations explained previously accept the following - options: + ### Default Key Generation - * `:cache` - Defines what cache to use (required). Raises `ArgumentError` - if the option is not present. It can be also a MFA tuple to resolve the - cache dynamically in runtime by calling it. See "The :cache option" - section below for more information. + Out of the box, the caching abstraction uses a simple key generator + strategy given by `Nebulex.Caching.SimpleKeyGenerator`, which is + based on the following algorithm: - * `:key` - Defines the cache access key (optional). It overrides the - `:key_generator` option. If this option is not present, a default - key is generated by the configured or default key generator. + * If no arguments are given, return `0`. + * If only one argument is given, return that param as key. + * If more than one argument is given, return a key computed + from the hash of all arguments (`:erlang.phash2(args)`). - * `:opts` - Defines the cache options that will be passed as argument - to the invoked cache function (optional). + To provide a different default key generator, one needs to implement the + `Nebulex.Caching.KeyGenerator` behaviour. Once configured via the + `:default_key_generator` option, the generator will be used for each + declaration that does not specify its own key generation strategy (see the + ["Custom Key Generation"](#module-custom-key-generation-declaration) + section down below). - * `:match` - Match function `t:match_fun/0`. This function is for matching - and deciding whether the code-block evaluation result (which is received - as an argument) is cached or not. The function should return: + The following example shows how to configure a custom default key generator: - * `true` - the code-block evaluation result is cached as it is - (the default). - * `{true, value}` - `value` is cached. This is useful to set what - exactly must be cached. - * `{true, value, opts}` - `value` is cached with the options given by - `opts`. This return allows us to set the value to be cached, as well - as the runtime options for storing it (e.g.: the `ttl`). - * `false` - Nothing is cached. + defmodule MyApp.Books do + use Nebulex.Caching, + cache: MyApp.Cache + default_key_generator: MyApp.Keygen - The default match function looks like this: + ... + end - ```elixir - fn - {:error, _} -> false - :error -> false - nil -> false - _ -> true + defmodule MyApp.Keygen do + @behaviour Nebulex.Caching.KeyGenerator + + @impl true + def generate(context) do + # your key generation logic ... + end end - ``` - - By default, if the code-block evaluation returns any of the following - terms/values `nil`, `:error`, `{:error, term}`, the default match - function returns `false` (the returned result is not cached), - otherwise, `true` is returned (the returned result is cached). - - * `:key_generator` - The custom key-generator to be used (optional). - If present, this option overrides the default key generator provided - by the cache, and it is applied only if the option `key:` or `keys:` - is not present. In other words, the option `key:` or `keys:` overrides - the `:key_generator` option. See "The `:key_generator` option" section - below for more information about the possible values. - - * `:on_error` - It may be one of `:raise` (the default) or `:nothing`. - The decorators/annotations call the cache under the hood, hence, - by default, any error or exception at executing a cache command - is propagated. When this option is set to `:nothing`, any error - or exception executing a cache command is ignored and the annotated - function is executed normally. - - ### The `:cache` option - - The cache option can be the de defined cache module or an MFA tuple to - resolve the cache dynamically in runtime. When it is an MFA tuple, the - MFA is invoked passing the calling module, function name, and arguments - by default, and the MFA arguments are passed as extra arguments. - For example: - - @decorate cacheable(cache: {MyApp.Cache, :cache, []}, key: var) - def some_function(var) do - # Some logic ... + + ### Custom Key Generation Declaration + + Since caching is generic, it is quite likely the target functions have + various signatures that cannot be simply mapped on top of the cache + structure. This tends to become obvious when the target function has + multiple arguments out of which only some are suitable for caching + (while the rest are used only by the function logic). For example: + + @decorate cacheable(cache: Cache) + def find_book(isbn, check_warehouse?, include_used?) do + # the logic for retrieving the book ... end - The annotated function above will call `MyApp.Cache.cache(mod, fun, args)` - to resolve the cache in runtime, where `mod` is the calling module, `fun` - the calling function name, and `args` the calling arguments. + At first glance, while the two `boolean` arguments influence the way the + book is found, they are not used for the cache. Furthermore, what if only + one of the two is important while the other is not? + + For such cases, the `cacheable` decorator allows the user to specify how + the key is generated through its `:key` attribute (the same applies to all + decorators). The developer can pick the arguments of interest (or their + nested properties), perform operations or even invoke arbitrary functions + without having to write any code or implement any interface. This is the + recommended approach over the default generator since functions tend to be + quite different in signatures as the code base grows; while the default + strategy might work for some functions, it rarely does for all functions. + + The following are some examples of generating keys: + + @decorate cacheable(cache: Cache, key: isbn) + def find_book(isbn, check_warehouse?, include_used?) do + # the logic for retrieving the book ... + end - Also, we can define the function passing some extra arguments, like so: + @decorate cacheable(cache: Cache, key: isbn.raw_number) + def find_book(isbn, check_warehouse?, include_used?) do + # the logic for retrieving the book ... + end - @decorate cacheable(cache: {MyApp.Cache, :cache, ["extra"]}, key: var) - def some_function(var) do - # Some logic ... + @decorate cacheable(cache: Cache, key: &{&1.function_name, hd(&1.args)}) + def find_book(isbn, check_warehouse?, include_used?) do + # the logic for retrieving the book ... end - In this case, the MFA will be invoked by adding the extra arguments, like: - `MyApp.Cache.cache(mod, fun, args, "extra")`. + In the last example, an anonymous function is used for generating the key. + It must be an anonymous function that expects the + [decorator's context](`t:context/0`) as an argument. - ### The `:key_generator` option + One can also provide options for the cache commands executed underneath, + like so: - The possible values for the `:key_generator` are: + @decorate cacheable(cache: Cache, key: isbn, opts: [ttl: 300_000]) + def find_book(isbn, check_warehouse?, include_used?) do + # the logic for retrieving the book ... + end - * A module implementing the `Nebulex.Caching.KeyGenerator` behaviour. + In that case, `opts: [ttl: 300_000]` specifies the TTL for the cached value. - * A MFA tuple `{module, function, args}` for a function to call to - generate the key before the cache is invoked. A shorthand value of - `{module, args}` is equivalent to - `{module, :generate, [calling_module, calling_function_name, args]}`. + See the ["Shared Options"](#module-shared-options) section + for more information. - ## Putting all together + ## Examples - Supposing we are using `Ecto` and we want to define some cacheable functions - within the context `MyApp.Accounts`: + Supposing an app uses Ecto, and there is a context for accessing books + `MyApp.Books`, we may decorate some functions as follows: - # The config + # The cache config config :my_app, MyApp.Cache, gc_interval: 86_400_000, #=> 1 day - backend: :shards + max_size: 1_000_000 #=> Max 1M books # The Cache defmodule MyApp.Cache do @@ -333,154 +316,327 @@ if Code.ensure_loaded?(Decorator.Define) do adapter: Nebulex.Adapters.Local end - # Some Ecto schema - defmodule MyApp.Accounts.User do + # Book schema + defmodule MyApp.Books.Book do use Ecto.Schema - schema "users" do - field(:username, :string) - field(:password, :string) - field(:role, :string) + schema "books" do + field(:isbn, :string) + field(:title, :string) + field(:author, :string) + # The rest of the fields omitted end - def changeset(user, attrs) do - user - |> cast(attrs, [:username, :password, :role]) - |> validate_required([:username, :password, :role]) + def changeset(book, attrs) do + book + |> cast(attrs, [:isbn, :title, :author]) + |> validate_required([:isbn, :title, :author]) end end - # Accounts context - defmodule MyApp.Accounts do - use Nebulex.Caching - - alias MyApp.Accounts.User - alias MyApp.{Cache, Repo} + # Books context + defmodule MyApp.Books do + use Nebulex.Caching, cache: MyApp.Cache - @ttl :timer.hours(1) + alias MyApp.Repo + alias MyApp.Books.Book - @decorate cacheable(cache: Cache, key: {User, id}, opts: [ttl: @ttl]) - def get_user!(id) do - Repo.get!(User, id) + @decorate cacheable(key: id) + def get_book(id) do + Repo.get(User, id) end - @decorate cacheable( - cache: Cache, - key: {User, username}, - opts: [ttl: @ttl] - ) - def get_user_by_username(username) do - Repo.get_by(User, [username: username]) + @decorate cacheable(key: isbn) + def get_book_by_isbn(isbn) do + Repo.get_by(Book, [isbn: isbn]) end - @decorate cache_put( - cache: Cache, - keys: [{User, usr.id}, {User, usr.username}], - match: &match_update/1 - ) - def update_user(%User{} = usr, attrs) do - usr - |> User.changeset(attrs) + @decorate cache_put(keys: [book.id, book.isbn], match: &match_fun/1) + def update_book(%Book{} = book, attrs) do + book + |> Book.changeset(attrs) |> Repo.update() end - defp match_update({:ok, usr}), do: {true, usr} - defp match_update({:error, _}), do: false + defp match_fun({:ok, usr}), do: {true, usr} + defp match_fun({:error, _}), do: false - @decorate cache_evict( - cache: Cache, - keys: [{User, usr.id}, {User, usr.username}] - ) - def delete_user(%User{} = usr) do - Repo.delete(usr) + @decorate cache_evict(keys: [book.id, book.isbn]) + def delete_book(%Book{} = book) do + Repo.delete(book) end - def create_user(attrs \\\\ %{}) do - %User{} - |> User.changeset(attrs) + def create_book(attrs \\\\ %{}) do + %Book{} + |> Book.changeset(attrs) |> Repo.insert() end end - See [Cache Usage Patters Guide](http://hexdocs.pm/nebulex/cache-usage-patterns.html). + ## Functions with multiple clauses + + Since [`decorator`](https://github.com/arjan/decorator#functions-with-multiple-clauses) + library is used, it is important to be aware of its recommendations, + caveats, limitations, and so on. For instance, for functions with multiple + clauses the general advice is to create an empty function head, and call + the decorator on that head, like so: + + @decorate cacheable(cache: Cache) + def get_user(id \\\\ nil) + + def get_user(nil), do: nil + + def get_user(id) do + # your logic ... + end + + However, the previous example works because we are not using the function + attributes for defining a custom key via the `:key` option. If we add + `key: id` for instance, we will get errors and/or warnings, since the + decorator is expecting the attribute `id` to be present, but it is not + in the first function clause. In other words, when we take this approach, + is like the decorator was applied to all function clauses separately. + To overcome this issue, the arguments used in the decorator must be + present in the function clauses, which could be achieved in different + ways. A simple way would be to decorate a wrapper function with the + arguments the decorator use and do the pattern-matching in a separate + function. + + @decorate cacheable(cache: Cache, key: id) + def get_user(id \\\\ nil) do + do_get_user(id) + end + + defp do_get_user(nil), do: nil + + defp do_get_user(id) do + # your logic ... + end + + Alternatively, you could decorate only the function clause needing the + caching. + + def get_user(nil), do: nil + + @decorate cacheable(cache: Cache, key: id) + def get_user(id) do + # your logic ... + end + + ## Further readings + + * [Cache Usage Patters Guide](http://hexdocs.pm/nebulex/cache-usage-patterns.html). + """ - use Decorator.Define, cacheable: 1, cache_evict: 1, cache_put: 1 + defmodule Context do + @moduledoc """ + Decorator context. + """ + + @typedoc """ + Decorator context type. + + The decorator context defines the following keys: + + * `:module` - The invoked module. + * `:function_name` - The invoked function name + * `:arity` - The arity of the invoked function. + * `:args` - The arguments that are given to the invoked function. + + ## Caveats about the `:args` + + The following are some caveats about the context's `:args` + to keep in mind: + + * Only arguments explicitly assigned to a variable will be included. + * Ignored or underscored arguments will be ignored. + * Pattern-matching expressions without a variable assignment will be + ignored. Therefore, if there is a pattern-matching and you want to + include its value, it has to be explicitly assigned to a variable. + + For example, suppose you have a module with a decorated function: + + defmodule MyApp.SomeModule do + use Nebulex.Caching - import Nebulex.Helpers + alias MyApp.Cache + + @decorate cacheable(cache: Cache, key: &my_key_generator/1) + def get_something(x, _y, _, {_, _}, [_, _], %{a: a}, %{} = z) do + # Function's logic + end + + def my_key_generator(context) do + # Key generation logic + end + end + + The generator will be invoked like so: + + my_key_generator(%Nebulex.Caching.Decorators.Context{ + module: MyApp.SomeModule, + function_name: :get_something, + arity: 7, + args: [x, z] + }) + + As you may notice, only the arguments `x` and `z` are included in the + context args when calling the `my_key_generator/1` function. + """ + @type t() :: %__MODULE__{ + module: module(), + function_name: atom(), + arity: non_neg_integer(), + args: [any()] + } + + # Context struct + defstruct module: nil, function_name: nil, arity: 0, args: [] + end + + use Decorator.Define, + cacheable: 0, + cacheable: 1, + cache_evict: 0, + cache_evict: 1, + cache_put: 0, + cache_put: 1 + + import Nebulex.Utils, only: [get_option: 5] import Record - ## Types + ## Records + + # Dynamic cache spec + defrecordp(:dynamic_cache, :"$nbx_dynamic_cache_spec", cache: nil, name: nil) # Key reference spec - defrecordp(:keyref, :"$nbx_cache_keyref", cache: nil, key: nil) + defrecordp(:keyref, :"$nbx_keyref_spec", cache: nil, key: nil) + + ## Types + + @typedoc "Proxy type to the decorator context" + @type context() :: Context.t() - @typedoc "Type spec for a key reference" - @type keyref :: record(:keyref, cache: Nebulex.Cache.t(), key: any) + @typedoc "Type spec for a dynamic cache definition" + @type dynamic_cache() :: record(:dynamic_cache, cache: module(), name: atom() | pid()) + + @typedoc "The type for the cache value" + @type cache_value() :: module() | dynamic_cache() + + @typedoc """ + The type for the `:cache` option value. + + When defining the `:cache` option on the decorated function, + the value can be: + + * The defined cache module. + * A dynamic cache spec built with the macro + [`dynamic_cache/2`](`Nebulex.Caching.dynamic_cache/2`). + * An anonymous function to call to resolve the cache value in runtime. + The function receives the decorator context as an argument and must + return either a cache module or a dynamic cache. + + """ + @type cache() :: cache_value() | (context() -> cache_value()) + + @typedoc """ + The type for the `:key` option value. + + When defining the `:key` option on the decorated function, + the value can be: + + * An anonymous function to call to generate the key in runtime. + The function receives the decorator context as an argument + and must return the key for caching. + * Any term. + + """ + @type key() :: (context() -> any()) | any() - @typedoc "Type for :on_error option" - @type on_error_opt :: :raise | :nothing + @typedoc "Type for on_error action" + @type on_error() :: :nothing | :raise - @typedoc "Match function type" - @type match_fun :: (any -> boolean | {true, any} | {true, any, Keyword.t()}) + @typedoc "Type for the match function return" + @type match_return() :: boolean() | {true, any()} | {true, any(), keyword()} - @typedoc "Type spec for the option :references" - @type references :: (any -> any) | nil | any + @typedoc "Type for match function" + @type match() :: + (result :: any() -> match_return()) + | (result :: any(), context() -> match_return()) - ## API + @typedoc "Type for a key reference spec" + @type keyref_spec() :: record(:keyref, cache: Nebulex.Cache.t(), key: any()) + + @typedoc "Type for a key reference" + @type keyref() :: keyref_spec() | any() + + @typedoc """ + Type spec for the option `:references`. + + When defining the `:references` option on the decorated function, + the value can be: + + * A special spec/tuple given by `t:keyref/0`, which can be built using + the macro [`keyref/2`](`Nebulex.Caching.keyref/2`). + * An anonymous function that expects the result of the function's code + block evaluation as an argument. Optionally the decorator context can + be received as a second argument. It must return the referenced key, + which could be `t:keyref/0` or any term. + * `nil` means there are no key references (ignored). + * Any term. + + See `cacheable/3` decorator for more information. + """ + @type references() :: + nil + | keyref() + | (result :: any() -> keyref() | any()) + | (result :: any(), context() -> keyref() | any()) + + ## Decorator API @doc """ - Provides a way of annotating functions to be cached (cacheable aspect). + Decorator indicating that the result of invoking a function can be cached. + + Each time a decorated function is invoked, `cacheable` behavior will be + applied, checking whether the function has been already invoked for the + given arguments. A default algorithm uses the function arguments to compute + the key, but a custom key can be provided via the `:key` attribute or a + custom key-generator implementation can replace the default one + (see ["Key Generation"](#module-key-generation) section at the module + documentation). - The returned value by the code block is cached if it doesn't exist already - in cache, otherwise, it is returned directly from cache and the code block - is not executed. + If no value is found in the cache for the computed key, the target function + will be invoked, and the returned value will be stored in the associated + cache. Note that what is cached can be handled with the `:match` option. ## Options - * `:references` - (Optional) (`t:references/0`) Indicates the key given - by the option `:key` references another key given by the option - `:references`. In other words, when it is present, this option tells - the `cacheable` decorator to store the function's block result under - the referenced key given by the option `:references`, and the referenced - key under the key given by the option `:key`. The value could be: - - * `nil` - (Default) It is ignored (no key references). - * `(term -> keyref | term)` - An anonymous function receiving the - result of the function's code block evaluation and must return the - referenced key. There is also a special type of return in case you - want to reference a key located in an external/different cache than - the one defined with the options `:key` or `:key_generator`. In this - scenario, you must return a special type `t:keyref/0`, which can be - build with the macro [`keyref/2`](`Nebulex.Caching.keyref/2`). - See the "External referenced keys" section below. - * `any` - It could be an explicit term or value, for example, a fixed - value or a function argument. - - See the "Referenced keys" section for more information. - - See the "Shared options" section at the module documentation. + #{Nebulex.Caching.Options.cacheable_options_docs()} + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Examples defmodule MyApp.Example do - use Nebulex.Caching - - alias MyApp.Cache + use Nebulex.Caching, cache: MyApp.Cache @ttl :timer.hours(1) - @decorate cacheable(cache: Cache, key: id, opts: [ttl: @ttl]) + @decorate cacheable(key: id, opts: [ttl: @ttl]) def get_by_id(id) do # your logic (maybe the loader to retrieve the value from the SoR) end - @decorate cacheable(cache: Cache, key: email, references: & &1.id) + @decorate cacheable(key: email, references: & &1.id) def get_by_email(email) do # your logic (maybe the loader to retrieve the value from the SoR) end - @decorate cacheable(cache: Cache, key: clauses, match: &match_fun/1) + @decorate cacheable(key: clauses, match: &match_fun/1) def all(clauses) do # your logic (maybe the loader to retrieve the value from the SoR) end @@ -502,29 +658,24 @@ if Code.ensure_loaded?(Decorator.Define) do those fields, like so: defmodule MyApp.UserAccounts do - use Nebulex.Caching - - alias MyApp.Cache + use Nebulex.Caching, cache: MyApp.Cache - @decorate cacheable(cache: Cache, key: id) + @decorate cacheable(key: id) def get_user_account(id) do # your logic ... end - @decorate cacheable(cache: Cache, key: email) + @decorate cacheable(key: email) def get_user_account_by_email(email) do # your logic ... end - @decorate cacheable(cache: Cache, key: token) + @decorate cacheable(key: token) def get_user_account_by_token(token) do # your logic ... end - @decorate cache_evict( - cache: Cache, - keys: [user.id, user.email, user.token] - ) + @decorate cache_evict(keys: [user.id, user.email, user.token]) def update_user_account(user) do # your logic ... end @@ -541,26 +692,24 @@ if Code.ensure_loaded?(Decorator.Define) do way. The module will look like this: defmodule MyApp.UserAccounts do - use Nebulex.Caching - - alias MyApp.Cache + use Nebulex.Caching, cache: MyApp.Cache - @decorate cacheable(cache: Cache, key: id) + @decorate cacheable(key: id) def get_user_account(id) do # your logic ... end - @decorate cacheable(cache: Cache, key: email, references: & &1.id) + @decorate cacheable(key: email, references: & &1.id) def get_user_account_by_email(email) do # your logic ... end - @decorate cacheable(cache: Cache, key: token, references: & &1.id) + @decorate cacheable(key: token, references: & &1.id) def get_user_account_by_token(token) do # your logic ... end - @decorate cache_evict(cache: Cache, key: user.id) + @decorate cache_evict(key: user.id) def update_user_account(user) do # your logic ... end @@ -641,57 +790,45 @@ if Code.ensure_loaded?(Decorator.Define) do `RedisCache`; underneath, the macro [`keyref/2`](`Nebulex.Caching.keyref/2`) builds the special return type for the external cache reference. """ - def cacheable(attrs, block, context) do + @doc group: "Decorator API" + def cacheable(attrs \\ [], block, context) do caching_action(:cacheable, attrs, block, context) end @doc """ - Provides a way of annotating functions to be evicted; but updating the - cached key instead of deleting it. - - The content of the cache is updated without interfering with the function - execution. That is, the method would always be executed and the result - cached. + Decorator indicating that a function triggers a + [cache put](`c:Nebulex.Cache.put/3`) operation. - The difference between `cacheable/3` and `cache_put/3` is that `cacheable/3` - will skip running the function if the key exists in the cache, whereas - `cache_put/3` will actually run the function and then put the result in - the cache. + In contrast to the `cacheable` decorator, this decorator does not cause the + decorated function to be skipped. Instead, it always causes the function + to be invoked and its result to be stored in the associated cache if the + condition given by the `:match` option matches accordingly. ## Options - * `:keys` - The set of cached keys to be updated with the returned value - on function completion. It overrides `:key` and `:key_generator` - options. + #{Nebulex.Caching.Options.cache_put_options_docs()} - See the "Shared options" section at the module documentation. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Examples defmodule MyApp.Example do - use Nebulex.Caching - - alias MyApp.Cache + use Nebulex.Caching, cache: MyApp.Cache @ttl :timer.hours(1) - @decorate cache_put(cache: Cache, key: id, opts: [ttl: @ttl]) + @decorate cache_put(key: id, opts: [ttl: @ttl]) def update!(id, attrs \\\\ %{}) do # your logic (maybe write data to the SoR) end - @decorate cache_put( - cache: Cache, - key: id, - match: &match_fun/1, - opts: [ttl: @ttl] - ) + @decorate cache_put(key: id, match: &match_fun/1, opts: [ttl: @ttl]) def update(id, attrs \\\\ %{}) do # your logic (maybe write data to the SoR) end @decorate cache_put( - cache: Cache, keys: [object.name, object.id], match: &match_fun/1, opts: [ttl: @ttl] @@ -708,54 +845,38 @@ if Code.ensure_loaded?(Decorator.Define) do provides the logic to write data to the system-of-record (SoR) and the rest is provided by the decorator under-the-hood. """ - def cache_put(attrs, block, context) do + @doc group: "Decorator API" + def cache_put(attrs \\ [], block, context) do caching_action(:cache_put, attrs, block, context) end @doc """ - Provides a way of annotating functions to be evicted (eviction aspect). - - On function's completion, the given key or keys (depends on the `:key` and - `:keys` options) are deleted from the cache. + Decorator indicating that a function triggers a cache evict operation + (`delete` or `delete_all`). ## Options - * `:keys` - Defines the set of keys to be evicted from cache on function - completion. It overrides `:key` and `:key_generator` options. - - * `:all_entries` - Defines if all entries must be removed on function - completion. Defaults to `false`. + #{Nebulex.Caching.Options.cache_evict_options_docs()} - * `:before_invocation` - Boolean to indicate whether the eviction should - occur after (the default) or before the function executes. The former - provides the same semantics as the rest of the annotations; once the - function completes successfully, an action (in this case eviction) - on the cache is executed. If the function does not execute (as it might - be cached) or an exception is raised, the eviction does not occur. - The latter (`before_invocation: true`) causes the eviction to occur - always, before the function is invoked; this is useful in cases where - the eviction does not need to be tied to the function outcome. - - See the "Shared options" section at the module documentation. + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. ## Examples defmodule MyApp.Example do - use Nebulex.Caching - - alias MyApp.Cache + use Nebulex.Caching, cache: MyApp.Cache - @decorate cache_evict(cache: Cache, key: id) + @decorate cache_evict(key: id) def delete(id) do # your logic (maybe write/delete data to the SoR) end - @decorate cache_evict(cache: Cache, keys: [object.name, object.id]) + @decorate cache_evict(keys: [object.name, object.id]) def delete_object(object) do # your logic (maybe write/delete data to the SoR) end - @decorate cache_evict(cache: Cache, all_entries: true) + @decorate cache_evict(all_entries: true) def delete_all do # your logic (maybe write/delete data to the SoR) end @@ -767,152 +888,178 @@ if Code.ensure_loaded?(Decorator.Define) do decorator, when the data is written to the SoR, the key for that value is deleted from cache instead of updated. """ - def cache_evict(attrs, block, context) do + @doc group: "Decorator API" + def cache_evict(attrs \\ [], block, context) do caching_action(:cache_evict, attrs, block, context) end + ## Decorator helpers + @doc """ - A convenience function for building a cache key reference when using the - `cacheable` decorator. If you want to build an external reference, which is, - referencing a `key` stored in an external cache, you have to provide the - `cache` where the `key` is located to. The `cache` argument is optional, - and by default is `nil`, which means, the referenced `key` is in the same - cache provided via `:key` or `:key_generator` options (internal reference). + A convenience function for defining a dynamic cache. - **NOTE:** In case you need to build a reference, consider using the macro - `Nebulex.Caching.keyref/2` instead. + The first argument `cache` specifies the defined cache module, + and the second argument `name` is the actual name of the cache. - See `cacheable/3` decorator for more information about external references. + **NOTE:** In case you need to define a dynamic cache, consider using + the macro `Nebulex.Caching.dynamic_cache/2` instead. - ## Examples + ## Example + + defmodule MyApp.Books do + use Nebulex.Caching - iex> Nebulex.Caching.Decorators.build_keyref("my-key") - {:"$nbx_cache_keyref", nil, "my-key"} - iex> Nebulex.Caching.Decorators.build_keyref(MyCache, "my-key") - {:"$nbx_cache_keyref", MyCache, "my-key"} + @decorate cacheable(cache: dynamic_cache(MyApp.Cache, :books)) + def find_book(isbn) do + # your logic ... + end + end + + """ + @doc group: "Decorator Helpers" + @spec dynamic_cache_spec(module(), atom() | pid()) :: dynamic_cache() + def dynamic_cache_spec(cache, name) do + dynamic_cache(cache: cache, name: name) + end + + @doc """ + A convenience function for building a reference to a cached key when + using the `cacheable` decorator. If `cache` is `nil` (the default), + the referenced key is looked up in the same cache provided via the + `:cache` option. When the key is located in another cache, the argument + `cache` can be set to the right/desired value. + + **NOTE:** In case you need to build a reference, consider using the macro + `Nebulex.Caching.keyref/2` instead. + See the ["Referenced keys"](#cacheable/3-referenced-keys) section + at the `cacheable` decorator for more information. """ - @spec build_keyref(Nebulex.Cache.t(), term) :: keyref() - def build_keyref(cache \\ nil, key) do + @doc group: "Decorator Helpers" + @spec keyref_spec(cache() | nil, any()) :: keyref_spec() + def keyref_spec(cache \\ nil, key) do keyref(cache: cache, key: key) end - ## Private Functions + ## Private functions for decorators defp caching_action(action, attrs, block, context) do - cache = attrs[:cache] || raise ArgumentError, "expected cache: to be given as argument" - opts_var = attrs[:opts] || [] - on_error_var = on_error_opt(attrs) - match_var = attrs[:match] || default_match_fun() + # Get options defined via the __using__ macro + caching_opts = Module.get_attribute(context.module, :__caching_opts__, []) - args = - context.args - |> Enum.reduce([], &walk/2) - |> Enum.reverse() + # Resolve the cache to use + cache_var = get_cache(attrs, caching_opts) + + # Get the options to be given to the cache commands + opts_var = attrs[:opts] || [] - cache_block = cache_block(cache, args, context) - keygen_block = keygen_block(attrs, args, context) - action_block = action_block(action, block, attrs, keygen_block) + # Build decorator context + context = decorator_context(context) + + # Build key generation block + keygen_block = keygen_block(attrs, caching_opts) + + # Build the action block + action_block = + action_block( + action, + block, + attrs, + keygen_block, + on_error_opt(attrs, Keyword.fetch!(caching_opts, :on_error)), + attrs[:match] || default_match_fun() + ) quote do - cache = unquote(cache_block) + # Set common vars + cache = unquote(cache_var) opts = unquote(opts_var) - match = unquote(match_var) - on_error = unquote(on_error_var) - unquote(action_block) + # Set the decorator context + :ok = unquote(__MODULE__).put_decorator_context(unquote(context)) + + try do + # Execute the decorated function's code block + unquote(action_block) + after + # Reset decorator context + :ok = unquote(__MODULE__).del_decorator_context() + end end end + defp get_cache(attrs, caching_opts) do + attrs[:cache] + |> Kernel.||(caching_opts[:cache]) + |> Kernel.||( + raise ArgumentError, + "expected :cache option to be found within the decorator options " <> + "if it is not configured globally in the caching definition " <> + "(e.g.: `use Nebulex.Caching, cache: MyCache`)" + ) + end + defp default_match_fun do quote do fn {:error, _} -> false :error -> false - nil -> false _ -> true end end end - defp walk({:\\, _, [ast, _]}, acc) do - walk(ast, acc) + defp decorator_context(context) do + # Sanitize context args + args = + context.args + |> Enum.reduce([], &sanitize_arg/2) + |> Enum.reverse() + + quote do + var!(ctx_args, __MODULE__) = unquote(args) + + %Context{ + module: unquote(context.module), + function_name: unquote(context.name), + arity: unquote(context.arity), + args: var!(ctx_args, __MODULE__) + } + end end - defp walk({:=, _, [_, ast]}, acc) do - walk(ast, acc) + defp sanitize_arg({:\\, _, [ast, _]}, acc) do + sanitize_arg(ast, acc) end - defp walk({var, [line: _], nil} = ast, acc) do + defp sanitize_arg({:=, _, [_, ast]}, acc) do + sanitize_arg(ast, acc) + end + + defp sanitize_arg({var, [line: _], nil} = ast, acc) do case "#{var}" do "_" <> _ -> acc _ -> [ast | acc] end end - defp walk(_ast, acc) do + defp sanitize_arg(_ast, acc) do acc end - # MFA cache: `{module, function, args}` - defp cache_block({:{}, _, [mod, fun, cache_args]}, args, ctx) do - quote do - unquote(mod).unquote(fun)( - unquote(ctx.module), - unquote(ctx.name), - unquote(args), - unquote_splicing(cache_args) - ) - end - end - - # Module implementing the cache behaviour (default) - defp cache_block({_, _, _} = cache, _args, _ctx) do - quote(do: unquote(cache)) - end - - defp keygen_block(attrs, args, ctx) do - cond do - key = Keyword.get(attrs, :key) -> + defp keygen_block(attrs, caching_opts) do + case Keyword.fetch(attrs, :key) do + {:ok, key} -> quote(do: unquote(key)) - keygen = Keyword.get(attrs, :key_generator) -> - keygen_call(keygen, ctx, args) - - true -> - quote do - cache.__default_key_generator__().generate( - unquote(ctx.module), - unquote(ctx.name), - unquote(args) - ) - end - end - end + :error -> + generator = Keyword.fetch!(caching_opts, :default_key_generator) - # MFA key-generator: `{module, function, args}` - defp keygen_call({:{}, _, [mod, fun, keygen_args]}, _ctx, _args) do - quote do - unquote(mod).unquote(fun)(unquote_splicing(keygen_args)) + quote(do: &unquote(generator).generate/1) end end - # Key-generator tuple `{module, args}`, where the `module` implements - # the key-generator behaviour - defp keygen_call({{_, _, _} = mod, keygen_args}, ctx, _args) when is_list(keygen_args) do - quote do - unquote(mod).generate(unquote(ctx.module), unquote(ctx.name), unquote(keygen_args)) - end - end - - # Key-generator module implementing the behaviour - defp keygen_call({_, _, _} = keygen, ctx, args) do - quote do - unquote(keygen).generate(unquote(ctx.module), unquote(ctx.name), unquote(args)) - end - end - - defp action_block(:cacheable, block, attrs, keygen) do + defp action_block(:cacheable, block, attrs, keygen, on_error, match) do references = Keyword.get(attrs, :references) quote do @@ -921,216 +1068,282 @@ if Code.ensure_loaded?(Decorator.Define) do unquote(keygen), unquote(references), opts, - on_error, - match, + unquote(match), + unquote(on_error), fn -> unquote(block) end ) end end - defp action_block(:cache_put, block, attrs, keygen) do - keys = get_keys(attrs) - - key = - if is_list(keys) and length(keys) > 0, - do: {:"$keys", keys}, - else: keygen + defp action_block(:cache_put, block, attrs, keygen, on_error, match) do + key = get_key(attrs, keygen) quote do result = unquote(block) - unquote(__MODULE__).run_cmd( - unquote(__MODULE__), - :eval_match, - [result, match, cache, unquote(key), opts], - on_error, - result + unquote(__MODULE__).eval_cache_put( + cache, + unquote(key), + result, + opts, + unquote(on_error), + unquote(match) ) result end end - defp action_block(:cache_evict, block, attrs, keygen) do - before_invocation? = attrs[:before_invocation] || false - - eviction = eviction_block(attrs, keygen) + defp action_block(:cache_evict, block, attrs, keygen, on_error, _match) do + before_invocation? = get_boolean(attrs, :before_invocation) + all_entries? = get_boolean(attrs, :all_entries) + key = get_key(attrs, keygen) - if is_boolean(before_invocation?) && before_invocation? do - quote do - unquote(eviction) - unquote(block) - end - else - quote do - result = unquote(block) - - unquote(eviction) - - result - end - end - end - - defp eviction_block(attrs, keygen) do - keys = get_keys(attrs) - all_entries? = attrs[:all_entries] || false - - cond do - is_boolean(all_entries?) && all_entries? -> - quote(do: unquote(__MODULE__).run_cmd(cache, :delete_all, [], on_error, 0)) - - is_list(keys) and length(keys) > 0 -> - delete_keys_block(keys) - - true -> - quote(do: unquote(__MODULE__).run_cmd(cache, :delete, [unquote(keygen)], on_error, :ok)) - end - end - - defp delete_keys_block(keys) do quote do - Enum.each(unquote(keys), &unquote(__MODULE__).run_cmd(cache, :delete, [&1], on_error, :ok)) + unquote(__MODULE__).eval_cache_evict( + cache, + unquote(key), + unquote(before_invocation?), + unquote(all_entries?), + unquote(on_error), + fn -> unquote(block) end + ) end end - defp get_keys(attrs) do - get_option( - attrs, - :keys, - "a list with at least one element", - &((is_list(&1) and length(&1) > 0) or is_nil(&1)) - ) + defp get_key(attrs, default) do + with keys when is_list(keys) and length(keys) > 0 <- + get_option( + attrs, + :keys, + "a list with at least one element", + &((is_list(&1) and length(&1) > 0) or is_nil(&1)), + default + ) do + {:"$keys", keys} + end end - defp on_error_opt(attrs) do + defp on_error_opt(attrs, default) do get_option( attrs, :on_error, ":raise or :nothing", &(&1 in [:raise, :nothing]), - :raise + default ) end - ## Helpers + defp get_boolean(attrs, key) do + get_option(attrs, key, "a boolean", &Kernel.is_boolean/1, false) + end + + ## Internal API + + # Inline common instructions + @compile {:inline, put_decorator_context: 1, get_decorator_context: 0, del_decorator_context: 0} @doc """ - Convenience function for evaluating the `cacheable` decorator in runtime. + Convenience function to set the decorator's context + for the current process. - **NOTE:** For internal purposes only. + **NOTE:** Internal purposes only. """ - @spec eval_cacheable( - module, - term, - references, - Keyword.t(), - on_error_opt, - match_fun, - (() -> term) - ) :: term - def eval_cacheable(cache, key, references, opts, on_error, match, block) - - def eval_cacheable(cache, key, nil, opts, on_error, match, block) do - with nil <- run_cmd(cache, :get, [key, opts], on_error) do - result = block.() - - run_cmd( - __MODULE__, - :eval_match, - [result, match, cache, key, opts], - on_error, - result - ) + @doc group: "Internal API" + @spec put_decorator_context(context()) :: :ok + def put_decorator_context(context) do + _ = Process.put({__MODULE__, :decorator_context}, context) - result - end + :ok end - def eval_cacheable(cache, key, references, opts, on_error, match, block) do - case run_cmd(cache, :get, [key, opts], on_error) do - nil -> - result = block.() - ref_key = eval_cacheable_ref(references, result) - - with true <- - run_cmd( - __MODULE__, - :eval_match, - [result, match, cache, ref_key, opts], - on_error, - result - ) do - :ok = cache_put(cache, key, ref_key, opts) - end + @doc """ + Convenience function to get the decorator's context + from the current process. - result + **NOTE:** Internal purposes only. + """ + @doc group: "Internal API" + @spec get_decorator_context() :: context() | nil + def get_decorator_context do + Process.get({__MODULE__, :decorator_context}) + end - keyref(cache: ref_cache, key: ref_key) -> - cache = ref_cache || cache + @doc """ + Convenience function to delete the decorator's context + from the current process. - with nil <- run_cmd(cache, :get, [ref_key, opts], on_error) do - result = block.() + **NOTE:** Internal purposes only. + """ + @doc group: "Internal API" + @spec del_decorator_context() :: :ok + def del_decorator_context do + _ = Process.delete({__MODULE__, :decorator_context}) - run_cmd( - __MODULE__, - :eval_match, - [result, match, cache, ref_key, opts], - on_error, - result - ) + :ok + end - result - end + @doc """ + Convenience function for wrapping and/or encapsulating + the **cacheable** decorator logic. + + **NOTE:** Internal purposes only. + """ + @doc group: "Internal API" + @spec eval_cacheable(any(), any(), references(), keyword(), match(), on_error(), fun()) :: any() + def eval_cacheable(cache, key, references, opts, match, on_error, block_fun) do + context = get_decorator_context() + cache = eval_cache(cache, context) + key = eval_key(cache, key, context) + + do_eval_cacheable(cache, key, references, opts, match, on_error, block_fun) + end + + defp do_eval_cacheable(cache, key, nil, opts, match, on_error, block_fun) do + cache + |> do_apply(:fetch, [key, opts]) + |> handle_cacheable( + on_error, + block_fun, + &__MODULE__.eval_cache_put(cache, key, &1, opts, on_error, match) + ) + end - val -> - val + defp do_eval_cacheable(cache, key, references, opts, match, on_error, block_fun) do + cache + |> do_apply(:fetch, [key, opts]) + |> case do + {:ok, keyref(cache: nil, key: ref_key)} -> + eval_cacheable(cache, ref_key, nil, opts, match, on_error, block_fun) + + {:ok, keyref(cache: ref_cache, key: ref_key)} -> + eval_cacheable(ref_cache, ref_key, nil, opts, match, on_error, block_fun) + + other -> + other + |> handle_cacheable(on_error, block_fun, fn result -> + reference = eval_cacheable_ref(references, result) + + with true <- eval_cache_put(cache, reference, result, opts, on_error, match) do + :ok = cache_put(cache, key, reference, opts) + end + end) end end defp eval_cacheable_ref(references, result) do - with ref_fun when is_function(ref_fun, 1) <- references do - ref_fun.(result) - end - |> case do + case eval_function(references, result) do keyref() = ref -> ref ref_key -> keyref(key: ref_key) end end + defp handle_cacheable({:ok, value}, _on_error, _block_fun, _key_err_fun) do + value + end + + defp handle_cacheable({:error, %Nebulex.KeyError{}}, _on_error, block_fun, key_err_fun) do + result = block_fun.() + + _ = key_err_fun.(result) + + result + end + + defp handle_cacheable({:error, _}, :nothing, block_fun, _key_err_fun) do + block_fun.() + end + + defp handle_cacheable({:error, reason}, :raise, _block_fun, _key_err_fun) do + raise reason + end + @doc """ - Convenience function for evaluating the `:match` function in runtime. + Convenience function for wrapping and/or encapsulating + the **cache_evict** decorator logic. + + **NOTE:** Internal purposes only. + """ + @doc group: "Internal API" + @spec eval_cache_evict(any(), any(), boolean(), boolean(), on_error(), fun()) :: any() + def eval_cache_evict(cache, key, before_invocation?, all_entries?, on_error, block_fun) do + context = get_decorator_context() + cache = eval_cache(cache, context) + key = eval_key(cache, key, context) + + do_eval_cache_evict(cache, key, before_invocation?, all_entries?, on_error, block_fun) + end + + defp do_eval_cache_evict(cache, key, true, all_entries?, on_error, block_fun) do + _ = do_evict(all_entries?, cache, key, on_error) + + block_fun.() + end - **NOTE:** For internal purposes only. + defp do_eval_cache_evict(cache, key, false, all_entries?, on_error, block_fun) do + result = block_fun.() - **NOTE:** Workaround to avoid dialyzer warnings when using declarative - annotation-based caching via decorators. + _ = do_evict(all_entries?, cache, key, on_error) + + result + end + + defp do_evict(true, cache, _key, on_error) do + run_cmd(cache, :delete_all, [nil, []], on_error) + end + + defp do_evict(false, cache, {:"$keys", keys}, on_error) do + Enum.each(keys, &__MODULE__.run_cmd(cache, :delete, [&1, []], on_error)) + end + + defp do_evict(false, cache, key, on_error) do + run_cmd(cache, :delete, [key, []], on_error) + end + + @doc """ + Convenience function for wrapping and/or encapsulating + the **cache_put** decorator logic. + + **NOTE:** Internal purposes only. """ - @spec eval_match(term, match_fun, module, term, Keyword.t()) :: boolean - def eval_match(result, match, cache, key, opts) + @doc group: "Internal API" + @spec eval_cache_put(any(), any(), any(), keyword(), on_error(), match()) :: any() + def eval_cache_put(cache, key, value, opts, on_error, match) do + context = get_decorator_context() + cache = eval_cache(cache, context) + key = eval_key(cache, key, context) + + do_eval_cache_put(cache, key, value, opts, on_error, match) + end - def eval_match(result, match, cache, keyref(cache: nil, key: key), opts) do - eval_match(result, match, cache, key, opts) + defp do_eval_cache_put(cache, keyref(cache: nil, key: key), value, opts, on_error, match) do + eval_cache_put(cache, key, value, opts, on_error, match) end - def eval_match(result, match, _cache, keyref(cache: ref_cache, key: key), opts) do - eval_match(result, match, ref_cache, key, opts) + defp do_eval_cache_put(_, keyref(cache: cache, key: key), value, opts, on_error, match) do + eval_cache_put(cache, key, value, opts, on_error, match) end - def eval_match(result, match, cache, key, opts) do - case match.(result) do - {true, value} -> - :ok = cache_put(cache, key, value, opts) + defp do_eval_cache_put(cache, key, value, opts, on_error, match) do + case eval_function(match, value) do + {true, cache_value} -> + _ = run_cmd(__MODULE__, :cache_put, [cache, key, cache_value, opts], on_error) true - {true, value, match_opts} -> - :ok = cache_put(cache, key, value, Keyword.merge(opts, match_opts)) + {true, cache_value, rt_opts} -> + _ = + run_cmd( + __MODULE__, + :cache_put, + [cache, key, cache_value, Keyword.merge(opts, rt_opts)], + on_error + ) true true -> - :ok = cache_put(cache, key, result, opts) + _ = run_cmd(__MODULE__, :cache_put, [cache, key, value, opts], on_error) true @@ -1140,40 +1353,100 @@ if Code.ensure_loaded?(Decorator.Define) do end @doc """ - Convenience function for cache_put annotation. + Convenience function for the `cache_put` decorator. - **NOTE:** For internal purposes only. + **NOTE:** Internal purposes only. """ - @spec cache_put(module, {:"$keys", term} | term, term, Keyword.t()) :: :ok + @doc group: "Internal API" + @spec cache_put(cache_value(), {:"$keys", any()} | any(), any(), keyword()) :: :ok def cache_put(cache, key, value, opts) def cache_put(cache, {:"$keys", keys}, value, opts) do - entries = for k <- keys, do: {k, value} - - cache.put_all(entries, opts) + do_apply(cache, :put_all, [Enum.map(keys, &{&1, value}), opts]) end def cache_put(cache, key, value, opts) do - cache.put(key, value, opts) + do_apply(cache, :put, [key, value, opts]) end @doc """ - Convenience function for ignoring cache errors when `:on_error` option - is set to `:nothing` + Convenience function for evaluating the `cache` argument. - **NOTE:** For internal purposes only. + **NOTE:** Internal purposes only. """ - @spec run_cmd(module, atom, [term], on_error_opt, term) :: term - def run_cmd(mod, fun, args, on_error, default \\ nil) + @doc group: "Internal API" + @spec eval_cache(any(), context()) :: cache_value() + def eval_cache(cache, ctx) - def run_cmd(mod, fun, args, :raise, _default) do - apply(mod, fun, args) + def eval_cache(cache, _ctx) when is_atom(cache), do: cache + def eval_cache(dynamic_cache() = cache, _ctx), do: cache + def eval_cache(cache, ctx) when is_function(cache, 1), do: cache.(ctx) + def eval_cache(cache, _ctx), do: raise_invalid_cache(cache) + + @doc """ + Convenience function for evaluating the `key` argument. + + **NOTE:** Internal purposes only. + """ + @doc group: "Internal API" + @spec eval_key(any(), any(), context()) :: any() + def eval_key(cache, key, ctx) + + def eval_key(_cache, key, ctx) when is_function(key, 1) do + key.(ctx) end - def run_cmd(mod, fun, args, :nothing, default) do + def eval_key(_cache, key, _ctx) do + key + end + + @doc """ + Convenience function for running a cache command. + + **NOTE:** Internal purposes only. + """ + @spec run_cmd(module(), atom(), [any()], on_error()) :: any() + def run_cmd(cache, fun, args, on_error) + + def run_cmd(cache, fun, args, :nothing) do + do_apply(cache, fun, args) + end + + def run_cmd(cache, fun, args, :raise) do + with {:error, reason} <- do_apply(cache, fun, args) do + raise reason + end + end + + ## Private functions + + @compile {:inline, raise_invalid_cache: 1} + + defp eval_function(fun, arg) when is_function(fun, 1) do + fun.(arg) + end + + defp eval_function(fun, arg) when is_function(fun, 2) do + fun.(arg, get_decorator_context()) + end + + defp eval_function(other, _arg) do + other + end + + defp do_apply(dynamic_cache(cache: cache, name: name), fun, args) do + apply(cache, fun, [name | args]) + end + + defp do_apply(mod, fun, args) do apply(mod, fun, args) - rescue - _e -> default + end + + @spec raise_invalid_cache(any()) :: no_return() + defp raise_invalid_cache(cache) do + raise ArgumentError, + "invalid value for :cache option: expected " <> + "t:Nebulex.Caching.Decorators.cache/0, got: #{inspect(cache)}" end end end diff --git a/lib/nebulex/caching/key_generator.ex b/lib/nebulex/caching/key_generator.ex index e2a7f556..8603d69d 100644 --- a/lib/nebulex/caching/key_generator.ex +++ b/lib/nebulex/caching/key_generator.ex @@ -1,52 +1,15 @@ defmodule Nebulex.Caching.KeyGenerator do @moduledoc """ - Cache key generator. Used for creating a key based on the given module, - function name and its arguments (the module and function name are used - as context). + Cache key generator. See the default implementation `Nebulex.Caching.SimpleKeyGenerator`. - - ## Caveats when using the key generator - - Since the callback `c:generate/3` is invoked passing the calling module where - the annotated function is defined, the name of the annotated function, and the - arguments given to that annotated function, there are some caveats to keep in - mind: - - * Only arguments explicitly assigned to a variable will be included when - calling the callback `c:generate/3`. - * Ignored or underscored arguments will be ignored. - * Pattern-matching expressions without a variable assignment will be - ignored. If there is a pattern-matching, it has to be explicitly - assigned to a variable so it can be included when calling the - callback `c:generate/3`. - - For example, suppose you have a module with an annotated function: - - defmodule MyApp.SomeModule do - use Nebulex.Caching - - alias MyApp.{Cache, CustomKeyGenerator} - - @decorate cacheable(cache: Cache, key_generator: CustomKeyGenerator) - def get_something(x, _ignored, _, {_, _}, [_, _], %{a: a}, %{} = y) do - # Function's logic - end - end - - The generator will be invoked like so: - - MyKeyGenerator.generate(MyApp.SomeModule, :get_something, [x, y]) - - Based on the caveats described above, only the arguments `x` and `y` are - included when calling the callback `c:generate/3`. """ @typedoc "Key generator type" - @type t :: module + @type t() :: module() @doc """ - Generates a key for the given `module`, `function_name`, and its `args`. + Receives the decorator `context` as an argument and returns the generated key. """ - @callback generate(module, function_name :: atom, args :: [term]) :: term + @callback generate(Nebulex.Caching.Decorators.Context.t()) :: any() end diff --git a/lib/nebulex/caching/options.ex b/lib/nebulex/caching/options.ex new file mode 100644 index 00000000..9abb1c1c --- /dev/null +++ b/lib/nebulex/caching/options.ex @@ -0,0 +1,248 @@ +defmodule Nebulex.Caching.Options do + @moduledoc """ + Options for caching decorators. + """ + + alias Nebulex.Cache.Options, as: CacheOpts + + # Options given to the __using__ macro + caching_opts = [ + cache: [ + type: :atom, + required: false, + doc: """ + Defines the cache all decorated functions in the module will use + by default. It can be overridden on each decorated function since + the `:cache` option is also available at the decorator level + (see ["Shared Options"](#module-shared-options)). + + See ["Default cache"](#module-default-cache) section + for more information. + """ + ], + on_error: [ + type: {:in, [:nothing, :raise]}, + type_doc: "`t:on_error/0`", + required: false, + default: :nothing, + doc: """ + Same as `:on_error` in the ["Shared Options"](#module-shared-options), + but applies to all decorated functions in a module as default. + """ + ], + default_key_generator: [ + type: + {:custom, CacheOpts, :__validate_behaviour__, + [Nebulex.Caching.KeyGenerator, "key-generator"]}, + type_doc: "`t:module/0`", + required: false, + default: Nebulex.Caching.SimpleKeyGenerator, + doc: """ + The default key-generator module the caching decorators will use. + """ + ] + ] + + # Shared decorator options + shared_opts = [ + cache: [ + type: :any, + type_doc: "`t:cache/0`", + required: true, + doc: """ + The cache to use (see `t:cache/0` for possible values). If configured, + it overrides the [default or global cache](#module-default-cache). + + An exception is raised if the `:cache` option is not provided in the + decorator declaration and is not configured globally when defining the + caching usage via `use Nebulex.Caching` either. + + See ["Cache configuration"](#module-cache-configuration) section + for more information. + """ + ], + key: [ + type: :any, + type_doc: "`t:key/0`", + required: false, + doc: """ + The cache access key the decorator will use when the function is invoked. + If the option is not provided, a default key is generated using the + default key generator. + + See ["Key Generation"](#module-key-generation) section + for more information. + """ + ], + opts: [ + type: :keyword_list, + required: false, + default: [], + doc: """ + The options used by the decorator when invoking cache commands. + """ + ], + match: [ + type: {:or, [fun: 1, fun: 2]}, + type_doc: "`t:match/0`", + required: false, + doc: """ + An anonymous function to decide whether the value returned by the + evaluation of the decorated function's code block (provided as a first + argument) is cached or not. Optionally, the match function can receive + the decorator context as a second argument. The match function can return: + + * `true` - the value returned by the decorated function's block + is cached (the default). + * `{true, value}` - `value` is cached. This is useful to set what + exactly must be cached. + * `{true, value, opts}` - `value` is cached with the options given by + `opts`. This return allows us to set the value to be cached, as well + as the runtime options for storing it (e.g.: the `ttl`). + * `false` - No value is stored in the cache. + + The default match function looks like this: + + ```elixir + fn + {:error, _} -> false + :error -> false + _ -> true + end + ``` + + By default, if the evaluation of the decorated function's block returns + any of the following terms/values `:error` or `{:error, term}`, the + default match function returns `false` (caching is bypassed), otherwise, + `true` is returned (the returned value is cached). Keep in mind the + default match function may store a `nil` value if it is returned by + the decorated function. If you don't want to cache `nil` values, + you should provide a match function for it. + """ + ], + on_error: [ + type: {:in, [:nothing, :raise]}, + type_doc: "`t:on_error/0`", + required: false, + default: :nothing, + doc: """ + The decorators perform cache commands under the hood. When the option + is set to `:raise` and a cache command returns an error, an exception + is raised. Otherwise, the error is ignored, and the decorated function + is executed normally. + + If configured, it overrides the global or default value + (e.g.: `use Nebulex.Caching, on_error: ...`). + """ + ] + ] + + # cacheable options + cacheable_opts = [ + references: [ + type: {:or, [{:fun, 1}, {:fun, 2}, nil, :any]}, + type_doc: "`t:references/0`", + required: false, + default: nil, + doc: """ + Indicates the key given by the option `:key` references another key given + by the option `:references`. In other words, when it is present, this + option tells the `cacheable` decorator to store the decorated function's + block result under the referenced key given by the option `:references`, + and the referenced key under the key given by the option `:key`. + + See the ["Referenced keys"](#cacheable/3-referenced-keys) section below + for more information. + """ + ] + ] + + # cache_put options + cache_put_opts = [ + keys: [ + type: {:list, :any}, + required: false, + doc: """ + A list of keys to be stored after the function is invoked. + It overrides the `:key` option. + """ + ] + ] + + # cache_evict options + cache_evict_opts = [ + keys: [ + type: {:list, :any}, + required: false, + doc: """ + A list of keys to be evicted after or before the function is invoked. + It overrides the `:key` option. + """ + ], + all_entries: [ + type: :boolean, + required: false, + default: false, + doc: """ + Whether all the entries inside the cache are removed. + """ + ], + before_invocation: [ + type: :boolean, + required: false, + default: false, + doc: """ + Whether the eviction should occur before the function is invoked. + """ + ] + ] + + # caching options schema + @caching_opts_schema NimbleOptions.new!(caching_opts) + + # shared options schema + @shared_opts_schema NimbleOptions.new!(shared_opts) + + # cacheable options schema + @cacheable_opts_schema NimbleOptions.new!(cacheable_opts) + + # cache_put options schema + @cache_put_opts_schema NimbleOptions.new!(cache_put_opts) + + # cache_evict options schema + @cache_evict_opts_schema NimbleOptions.new!(cache_evict_opts) + + ## Docs API + + @spec caching_options_docs() :: binary() + def caching_options_docs do + NimbleOptions.docs(@caching_opts_schema) + end + + @spec shared_options_docs() :: binary() + def shared_options_docs do + NimbleOptions.docs(@shared_opts_schema) + end + + @spec cacheable_options_docs() :: binary() + def cacheable_options_docs do + NimbleOptions.docs(@cacheable_opts_schema) + end + + @spec cache_put_options_docs() :: binary() + def cache_put_options_docs do + NimbleOptions.docs(@cache_put_opts_schema) + end + + @spec cache_evict_options_docs() :: binary() + def cache_evict_options_docs do + NimbleOptions.docs(@cache_evict_opts_schema) + end + + ## Validation API + + @spec validate_caching_opts!(keyword()) :: keyword() + def validate_caching_opts!(opts) do + CacheOpts.validate!(opts, @caching_opts_schema) + end +end diff --git a/lib/nebulex/caching/simple_key_generator.ex b/lib/nebulex/caching/simple_key_generator.ex index 3d4396bd..f2668aa5 100644 --- a/lib/nebulex/caching/simple_key_generator.ex +++ b/lib/nebulex/caching/simple_key_generator.ex @@ -4,40 +4,40 @@ defmodule Nebulex.Caching.SimpleKeyGenerator do It implementats a simple algorithm: - * If no params are given, return `0`. - * If only one param is given, return that param as key. - * If more than one param is given, return a key computed from the hashes - of all parameters (`:erlang.phash2(args)`). + * If no arguments are given, return `0`. + * If only one argument is given, return that param as key. + * If more than one argument is given, return a key computed + from the hash of all arguments (`:erlang.phash2(args)`). - > Based on the [default key generation in Spring Cache Abstraction](https://docs.spring.io/spring-framework/docs/3.2.x/spring-framework-reference/html/cache.html#cache-annotations-cacheable-default-key). - - This implementation aims to cover those simple/generic scenarios where the - key generated based on the arguments only, fulfill the needs. For example: + This approach works well for those cases where the decorated functions keep + the same arguments (same hash code). For example: defmodule MyApp.Users do - use Nebulex.Caching - - alias MayApp.Cache + use Nebulex.Caching, cache: MayApp.Cache - @decorate cacheable(cache: Cache) + @decorate cacheable() def get_user(id) do # logic for retrieving a user... end - @decorate cache_evict(cache: Cache) + @decorate cache_evict() def delete_user(id) do # logic for deleting a user... end end - The key generator will generate the same key for both, cacheable and - evict functions; since it is generated based on the arguments only. + The previous example works because the hash code of the arguments in both + decorated functions will be the same. """ @behaviour Nebulex.Caching.KeyGenerator + alias Nebulex.Caching.Decorators.Context + @impl true - def generate(_mod, _fun, []), do: 0 - def generate(_mod, _fun, [arg]), do: arg - def generate(_mod, _fun, args), do: :erlang.phash2(args) + def generate(context) + + def generate(%Context{args: []}), do: 0 + def generate(%Context{args: [arg]}), do: arg + def generate(%Context{args: args}), do: :erlang.phash2(args) end diff --git a/lib/nebulex/entry.ex b/lib/nebulex/entry.ex index 8bc41b68..99547f1c 100644 --- a/lib/nebulex/entry.ex +++ b/lib/nebulex/entry.ex @@ -9,7 +9,7 @@ defmodule Nebulex.Entry do defstruct key: nil, value: nil, touched: nil, - ttl: :infinity, + exp: :infinity, time_unit: :millisecond @typedoc """ @@ -18,11 +18,11 @@ defmodule Nebulex.Entry do The entry depends on the adapter completely, this struct/type aims to define the common fields. """ - @type t :: %__MODULE__{ + @type t() :: %__MODULE__{ key: any, value: any, touched: integer, - ttl: timeout, + exp: timeout, time_unit: System.time_unit() } @@ -39,7 +39,7 @@ defmodule Nebulex.Entry do "hello" """ - @spec encode(term, [term]) :: binary + @spec encode(any(), [any()]) :: binary() def encode(data, opts \\ []) do data |> :erlang.term_to_binary(opts) @@ -58,7 +58,7 @@ defmodule Nebulex.Entry do """ # sobelow_skip ["Misc.BinToTerm"] - @spec decode(binary, [term]) :: term + @spec decode(binary(), [any()]) :: any() def decode(data, opts \\ []) when is_binary(data) do data |> Base.decode64!() @@ -73,18 +73,14 @@ defmodule Nebulex.Entry do iex> Nebulex.Entry.expired?(%Nebulex.Entry{}) false - iex> Nebulex.Entry.expired?( - ...> %Nebulex.Entry{touched: Nebulex.Time.now() - 10, ttl: 1} - ...> ) + iex> now = Nebulex.Time.now() + iex> Nebulex.Entry.expired?(%Nebulex.Entry{touched: now, exp: now - 10}) true """ - @spec expired?(t) :: boolean - def expired?(%__MODULE__{ttl: :infinity}), do: false - - def expired?(%__MODULE__{touched: touched, ttl: ttl, time_unit: unit}) do - Time.now(unit) - touched >= ttl - end + @spec expired?(t()) :: boolean() + def expired?(%__MODULE__{exp: :infinity}), do: false + def expired?(%__MODULE__{exp: exp, time_unit: unit}), do: Time.now(unit) >= exp @doc """ Returns the remaining time-to-live. @@ -94,18 +90,13 @@ defmodule Nebulex.Entry do iex> Nebulex.Entry.ttl(%Nebulex.Entry{}) :infinity - iex> ttl = - ...> Nebulex.Entry.ttl( - ...> %Nebulex.Entry{touched: Nebulex.Time.now(), ttl: 100} - ...> ) + iex> now = Nebulex.Time.now() + iex> ttl = Nebulex.Entry.ttl(%Nebulex.Entry{touched: now, exp: now + 10}) iex> ttl > 0 true """ - @spec ttl(t) :: timeout - def ttl(%__MODULE__{ttl: :infinity}), do: :infinity - - def ttl(%__MODULE__{ttl: ttl, touched: touched, time_unit: unit}) do - ttl - (Time.now(unit) - touched) - end + @spec ttl(t()) :: timeout() + def ttl(%__MODULE__{exp: :infinity}), do: :infinity + def ttl(%__MODULE__{exp: exp, time_unit: unit}), do: exp - Time.now(unit) end diff --git a/lib/nebulex/exceptions.ex b/lib/nebulex/exceptions.ex index 3e5f536e..bb65154c 100644 --- a/lib/nebulex/exceptions.ex +++ b/lib/nebulex/exceptions.ex @@ -1,116 +1,149 @@ -defmodule Nebulex.RegistryLookupError do +defmodule Nebulex.Error do @moduledoc """ - Raised at runtime when the cache was not started or it does not exist. + This exception represents cache command execution errors. For example, + the command cannot be executed because the cache was not started or it + does not exist, or the adapter failed while executing it. + + ## Struct fields + + The exception struct itself is opaque, that is, not all fields are public. + The following are the public fields: + + * `:module` - a custom error formatter module. When it is present, + it invokes `module.format_error(reason)` to format the error reason. + + * `:reason` - a term representing the error reason. The value of this field + can be: + + * `:registry_lookup_error` - the cache cannot be retrieved from the + registry because it was not started or it does not exist. + + * `:invalid_query` - (Queryable API) if the underlying adapter + cannot run a given query because it is invalid or not supported. + + * `:transaction_aborted` - (Transaction API) if a transaction + is aborted by the underlying adapter. + + * `:opts` - The options that are given to the exception when it is + created or raised. + """ - @type t :: %__MODULE__{message: binary, name: atom} + @typedoc "Error reason type" + @type reason() :: atom() | {atom(), any()} | Exception.t() + + @typedoc "Error type" + @type t() :: %__MODULE__{reason: reason(), module: module(), opts: keyword()} - defexception [:message, :name] + # Exception struct + defexception reason: nil, module: __MODULE__, opts: [] - @doc false + ## API + + @impl true def exception(opts) do - name = Keyword.fetch!(opts, :name) + {reason, opts} = Keyword.pop!(opts, :reason) + {module, opts} = Keyword.pop(opts, :module, __MODULE__) - msg = - "could not lookup Nebulex cache #{inspect(name)} because it was " <> - "not started or it does not exist" + %__MODULE__{reason: reason, module: module, opts: opts} + end - %__MODULE__{message: msg, name: name} + @impl true + def message(%__MODULE__{reason: reason, module: module, opts: opts}) do + module.format_error(reason, opts) end -end -defmodule Nebulex.KeyAlreadyExistsError do - @moduledoc """ - Raised at runtime when a key already exists in cache. - """ + ## Helpers - @type t :: %__MODULE__{key: term, cache: atom} + def format_error(:registry_lookup_error, opts) do + cache = Keyword.get(opts, :cache) - defexception [:key, :cache] + "could not lookup Nebulex cache #{inspect(cache)} because it was " <> + "not started or it does not exist" + end + + def format_error(:invalid_query, opts) do + cache = Keyword.get(opts, :cache) + query = Keyword.get(opts, :query) - @doc false - def message(%{key: key, cache: cache}) do - "key #{inspect(key)} already exists in cache #{inspect(cache)}" + "cache #{inspect(cache)} cannot execute invalid query #{inspect(query)}" end -end -defmodule Nebulex.QueryError do - @moduledoc """ - Raised at runtime when the query is invalid. - """ + def format_error(:transaction_aborted, opts) do + cache = Keyword.get(opts, :cache) + nodes = Keyword.get(opts, :nodes) - @type t :: %__MODULE__{message: binary} + "cache #{inspect(cache)} has aborted a transaction on nodes: #{inspect(nodes)}" + end - defexception [:message] + def format_error(exception, opts) when is_exception(exception) do + cache = Keyword.get(opts, :cache) - @doc false - def exception(opts) do - message = Keyword.fetch!(opts, :message) - query = Keyword.fetch!(opts, :query) + """ + the following exception occurred in the cache #{inspect(cache)}. - message = """ - #{message} in query: + #{Exception.format(:error, exception, []) |> String.replace("\n", "\n ")} - #{inspect(query, pretty: true)} """ + end - %__MODULE__{message: message} + def format_error(reason, opts) do + cache = Keyword.get(opts, :cache) + + "cache #{inspect(cache)} failed with reason: #{inspect(reason)}" end end -defmodule Nebulex.RPCMultiCallError do +defmodule Nebulex.KeyError do @moduledoc """ - Raised at runtime when a RPC multi_call error occurs. - """ + Raised at runtime when a key does not exist in cache. - @type t :: %__MODULE__{message: binary} + This exception denotes the cache command was executed, but there was + an issue with the requested key; for example, it was not found. - defexception [:message] + ## Struct fields - @doc false - def exception(opts) do - action = Keyword.fetch!(opts, :action) - errors = Keyword.fetch!(opts, :errors) - responses = Keyword.fetch!(opts, :responses) + The exception struct itself is opaque, that is, not all fields are public. + The following are the public fields: - message = """ - RPC error while executing action #{inspect(action)} + * `:cache` - the cache where the exception occurred. - Successful responses: + * `:key` - the key causing the error. - #{inspect(responses, pretty: true)} + * `:reason` - the two possible reasons are: `:not_found` and `:expired`. + Defaults to `:not_found`. - Remote errors: + """ - #{inspect(errors, pretty: true)} - """ + @typedoc "Error type" + @type t() :: %__MODULE__{cache: any(), key: any(), reason: atom()} - %__MODULE__{message: message} - end -end + # Exception struct + defexception cache: nil, key: nil, reason: :not_found -defmodule Nebulex.RPCError do - @moduledoc """ - Raised at runtime when a RPC error occurs. - """ + ## API - @type t :: %__MODULE__{reason: atom, node: node} + @impl true + def exception(opts) do + key = Keyword.fetch!(opts, :key) + cache = Keyword.fetch!(opts, :cache) + reason = Keyword.get(opts, :reason, :not_found) - defexception [:reason, :node] + %__MODULE__{key: key, cache: cache, reason: reason} + end - @doc false - def message(%__MODULE__{reason: reason, node: node}) do - format_reason(reason, node) + @impl true + def message(%__MODULE__{key: key, cache: cache, reason: reason}) do + format_reason(reason, key, cache) end - # :erpc.call/5 doesn't format error messages. - defp format_reason({:erpc, _} = reason, node) do - """ - The RPC operation failed on node #{inspect(node)} with reason: + ## Helpers - #{inspect(reason)} + defp format_reason(:not_found, key, cache) do + "key #{inspect(key)} not found in cache: #{inspect(cache)}" + end - See :erpc.call/5 for more information about the error reasons. - """ + defp format_reason(:expired, key, cache) do + "key #{inspect(key)} has expired in cache: #{inspect(cache)}" end end diff --git a/lib/nebulex/helpers.ex b/lib/nebulex/helpers.ex deleted file mode 100644 index 4e4b51c5..00000000 --- a/lib/nebulex/helpers.ex +++ /dev/null @@ -1,60 +0,0 @@ -defmodule Nebulex.Helpers do - # Module for general purpose helpers. - @moduledoc false - - ## API - - @spec get_option(Keyword.t(), atom, binary, (any -> boolean), term) :: term - def get_option(opts, key, expected, valid?, default \\ nil) - when is_list(opts) and is_atom(key) do - value = Keyword.get(opts, key, default) - - if valid?.(value) do - value - else - raise ArgumentError, "expected #{key}: to be #{expected}, got: #{inspect(value)}" - end - end - - @spec get_boolean_option(Keyword.t(), atom, boolean) :: term - def get_boolean_option(opts, key, default \\ false) - when is_list(opts) and is_atom(key) and is_boolean(default) do - value = Keyword.get(opts, key, default) - - if is_boolean(value) do - value - else - raise ArgumentError, "expected #{key}: to be boolean, got: #{inspect(value)}" - end - end - - @spec assert_behaviour(module, module, binary) :: module - def assert_behaviour(module, behaviour, msg \\ "module") do - if behaviour in module_behaviours(module, msg) do - module - else - raise ArgumentError, - "expected #{inspect(module)} to implement the behaviour #{inspect(behaviour)}" - end - end - - @spec module_behaviours(module, binary) :: [module] - def module_behaviours(module, msg) do - if Code.ensure_compiled(module) != {:module, module} do - raise ArgumentError, - "#{msg} #{inspect(module)} was not compiled, " <> - "ensure it is correct and it is included as a project dependency" - end - - for {:behaviour, behaviours} <- module.__info__(:attributes), - behaviour <- behaviours, - do: behaviour - end - - @spec normalize_module_name([atom | binary | number]) :: module - def normalize_module_name(list) when is_list(list) do - list - |> Enum.map(&Macro.camelize("#{&1}")) - |> Module.concat() - end -end diff --git a/lib/nebulex/hook.ex b/lib/nebulex/hook.ex deleted file mode 100644 index 4601102b..00000000 --- a/lib/nebulex/hook.ex +++ /dev/null @@ -1,269 +0,0 @@ -if Code.ensure_loaded?(Decorator.Define) do - defmodule Nebulex.Hook do - @moduledoc """ - Pre/Post Hooks - - Since `v2.0.0`, pre/post hooks are not supported and/or handled by `Nebulex` - itself. Hooks feature is not a common use-case and also it is something that - can be be easily implemented on top of the Cache at the application level. - - Nevertheless, to keep backward compatibility somehow, `Nebulex` provides the - next decorators for implementing pre/post hooks very easily. - - ## `before` decorator - - The `before` decorator is declared for performing a hook action or callback - before the annotated function is executed. - - @decorate before(fn %Nebulex.Hook{} = hook -> inspect(hook) end) - def some_fun(var) do - # logic ... - end - - ## `after_return` decorator - - The `after_return` decorator is declared for performing a hook action or - callback after the annotated function is executed and its return is passed - through the `return:` attribute. - - @decorate after_return(&inspect(&1.return)) - def some_fun(var) do - # logic ... - end - - ## `around` decorator - - The final kind of hook is `around` decorator. The `around` decorator runs - "around" the annotated function execution. It has the opportunity to do - work both **before** and **after** the function executes. This means the - given hook function is invoked twice, before and after the code-block is - evaluated. - - @decorate around(&inspect(&1.step)) - def some_fun(var) do - # logic ... - end - - ## Putting all together - - Suppose we want to track all cache calls (before and after they are called) - by logging them (including the execution time). In this case, we need to - provide a pre/post hook to log these calls. - - First of all, we have to create a module implementing the hook function: - - defmodule MyApp.Tracker do - use GenServer - - alias Nebulex.Hook - - require Logger - - @actions [:get, :put] - - ## API - - def start_link(opts \\\\ []) do - GenServer.start_link(__MODULE__, opts, name: __MODULE__) - end - - def track(%Hook{step: :before, name: name}) when name in @actions do - System.system_time(:microsecond) - end - - def track(%Hook{step: :after_return, name: name} = event) when name in @actions do - GenServer.cast(__MODULE__, {:track, event}) - end - - def track(hook), do: hook - - ## GenServer Callbacks - - @impl true - def init(_opts) do - {:ok, %{}} - end - - @impl true - def handle_cast({:track, %Hook{acc: start} = hook}, state) do - diff = System.system_time(:microsecond) - start - Logger.info("#=> #\{hook.module}.#\{hook.name}/#\{hook.arity}, Duration: #\{diff}") - {:noreply, state} - end - end - - And then, in the Cache: - - defmodule MyApp.Cache do - use Nebulex.Hook - @decorate_all around(&MyApp.Tracker.track/1) - - use Nebulex.Cache, - otp_app: :my_app, - adapter: Nebulex.Adapters.Local - end - - Try it out: - - iex> MyApp.Cache.put 1, 1 - 10:19:47.736 [info] Elixir.MyApp.Cache.put/3, Duration: 27 - iex> MyApp.Cache.get 1 - 10:20:14.941 [info] Elixir.MyApp.Cache.get/2, Duration: 11 - - """ - - use Decorator.Define, before: 1, after_return: 1, around: 1 - - @enforce_keys [:step, :module, :name, :arity] - defstruct [:step, :module, :name, :arity, :return, :acc] - - @type t :: %__MODULE__{ - step: :before | :after_return, - module: Nebulex.Cache.t(), - name: atom, - arity: non_neg_integer, - return: term, - acc: term - } - - @type hook_fun :: (t -> term) - - alias Nebulex.Hook - - @doc """ - Before decorator. - - Intercepts any call to the annotated function and calls the given `fun` - before the logic is executed. - - ## Example - - defmodule MyApp.Example do - use Nebulex.Hook - - @decorate before(&inspect(&1)) - def some_fun(var) do - # logic ... - end - end - - """ - @spec before(hook_fun, term, map) :: term - def before(fun, block, context) do - with_hook([:before], fun, block, context) - end - - @doc """ - After-return decorator. - - Intercepts any call to the annotated function and calls the given `fun` - after the logic is executed, and the returned result is passed through - the `return:` attribute. - - ## Example - - defmodule MyApp.Example do - use Nebulex.Hook - - @decorate after_return(&inspect(&1)) - def some_fun(var) do - # logic ... - end - end - - """ - @spec after_return(hook_fun, term, map) :: term - def after_return(fun, block, context) do - with_hook([:after_return], fun, block, context) - end - - @doc """ - Around decorator. - - Intercepts any call to the annotated function and calls the given `fun` - before and after the logic is executed. The result of the first call to - the hook function is passed through the `acc:` attribute, so it can be - used in the next call (after return). Finally, as the `after_return` - decorator, the returned code-block evaluation is passed through the - `return:` attribute. - - ## Example - - defmodule MyApp.Profiling do - alias Nebulex.Hook - - def prof(%Hook{step: :before}) do - System.system_time(:microsecond) - end - - def prof(%Hook{step: :after_return, acc: start} = hook) do - :telemetry.execute( - [:my_app, :profiling], - %{duration: System.system_time(:microsecond) - start}, - %{module: hook.module, name: hook.name} - ) - end - end - - defmodule MyApp.Example do - use Nebulex.Hook - - @decorate around(&MyApp.Profiling.prof/1) - def some_fun(var) do - # logic ... - end - end - - """ - @spec around(hook_fun, term, map) :: term - def around(fun, block, context) do - with_hook([:before, :after_return], fun, block, context) - end - - defp with_hook(hooks, fun, block, context) do - quote do - hooks = unquote(hooks) - fun = unquote(fun) - - hook = %Nebulex.Hook{ - step: :before, - module: unquote(context.module), - name: unquote(context.name), - arity: unquote(context.arity) - } - - # eval before - acc = - if :before in hooks do - Hook.eval_hook(:before, fun, hook) - end - - # eval code-block - return = unquote(block) - - # eval after_return - if :after_return in hooks do - Hook.eval_hook( - :after_return, - fun, - %{hook | step: :after_return, return: return, acc: acc} - ) - end - - return - end - end - - @doc """ - This function is for internal purposes. - """ - @spec eval_hook(:before | :after_return, hook_fun, t) :: term - def eval_hook(step, fun, hook) do - fun.(hook) - rescue - e -> - msg = "hook execution failed on step #{inspect(step)} with error #{inspect(e)}" - reraise RuntimeError, msg, __STACKTRACE__ - end - end -end diff --git a/lib/nebulex/rpc.ex b/lib/nebulex/rpc.ex deleted file mode 100644 index a6b5ac88..00000000 --- a/lib/nebulex/rpc.ex +++ /dev/null @@ -1,254 +0,0 @@ -defmodule Nebulex.RPC do - @moduledoc """ - RPC utilities for distributed task execution. - - This module uses supervised tasks underneath `Task.Supervisor`. - - > **NOTE:** The approach by using distributed tasks will be deprecated - in the future in favor of `:erpc`. - """ - - @typedoc "Task supervisor" - @type task_sup :: Supervisor.supervisor() - - @typedoc "Task callback" - @type callback :: {module, atom, [term]} - - @typedoc "Group entry: node -> callback" - @type node_callback :: {node, callback} - - @typedoc "Node group" - @type node_group :: %{optional(node) => callback} | [node_callback] - - @typedoc "Reducer function spec" - @type reducer_fun :: ({:ok, term} | {:error, term}, node_callback | node, term -> term) - - @typedoc "Reducer spec" - @type reducer :: {acc :: term, reducer_fun} - - ## API - - @doc """ - Evaluates `apply(mod, fun, args)` on node `node` and returns the corresponding - evaluation result, or `{:badrpc, reason}` if the call fails. - - A timeout, in milliseconds or `:infinity`, can be given with a default value - of `5000`. It uses `Task.await/2` internally. - - ## Example - - iex> Nebulex.RPC.call(:my_task_sup, :node1, Kernel, :to_string, [1]) - "1" - - """ - @spec call(task_sup, node, module, atom, [term], timeout) :: term | {:badrpc, term} - def call(supervisor, node, mod, fun, args, timeout \\ 5000) do - rpc_call(supervisor, node, mod, fun, args, timeout) - end - - @doc """ - In contrast to a regular single-node RPC, a multicall is an RPC that is sent - concurrently from one client to multiple servers. The function evaluates - `apply(mod, fun, args)` on each `node_group` entry and collects the answers. - Then, evaluates the `reducer` function (set in the `opts`) on each answer. - - This function is similar to `:rpc.multicall/5`. - - ## Options - - * `:timeout` - A timeout, in milliseconds or `:infinity`, can be given with - a default value of `5000`. It uses `Task.yield_many/2` internally. - - * `:reducer` - Reducer function to be executed on each collected result. - (check out `reducer` type). - - ## Example - - iex> Nebulex.RPC.multi_call( - ...> :my_task_sup, - ...> %{ - ...> node1: {Kernel, :to_string, [1]}, - ...> node2: {Kernel, :to_string, [2]} - ...> }, - ...> timeout: 10_000, - ...> reducer: { - ...> [], - ...> fn - ...> {:ok, res}, _node_callback, acc -> - ...> [res | acc] - ...> - ...> {:error, _}, _node_callback, acc -> - ...> acc - ...> end - ...> } - ...> ) - ["1", "2"] - - """ - @spec multi_call(task_sup, node_group, Keyword.t()) :: term - def multi_call(supervisor, node_group, opts \\ []) do - rpc_multi_call(supervisor, node_group, opts) - end - - @doc """ - Similar to `multi_call/3` but the same `node_callback` (given by `module`, - `fun`, `args`) is executed on all `nodes`; Internally it creates a - `node_group` with the same `node_callback` for each node. - - ## Options - - Same options as `multi_call/3`. - - ## Example - - iex> Nebulex.RPC.multi_call( - ...> :my_task_sup, - ...> [:node1, :node2], - ...> Kernel, - ...> :to_string, - ...> [1], - ...> timeout: 5000, - ...> reducer: { - ...> [], - ...> fn - ...> {:ok, res}, _node_callback, acc -> - ...> [res | acc] - ...> - ...> {:error, _}, _node_callback, acc -> - ...> acc - ...> end - ...> } - ...> ) - ["1", "1"] - - """ - @spec multi_call(task_sup, [node], module, atom, [term], Keyword.t()) :: term - def multi_call(supervisor, nodes, mod, fun, args, opts \\ []) do - rpc_multi_call(supervisor, nodes, mod, fun, args, opts) - end - - ## Helpers - - if Code.ensure_loaded?(:erpc) do - defp rpc_call(_supervisor, node, mod, fun, args, _timeout) when node == node() do - apply(mod, fun, args) - end - - defp rpc_call(_supervisor, node, mod, fun, args, timeout) do - :erpc.call(node, mod, fun, args, timeout) - rescue - e in ErlangError -> - case e.original do - {:exception, original, _} when is_struct(original) -> - reraise original, __STACKTRACE__ - - {:exception, original, _} -> - :erlang.raise(:error, original, __STACKTRACE__) - - other -> - reraise %Nebulex.RPCError{reason: other, node: node}, __STACKTRACE__ - end - end - - def rpc_multi_call(_supervisor, node_group, opts) do - {reducer_acc, reducer_fun} = opts[:reducer] || default_reducer() - timeout = opts[:timeout] || 5000 - - node_group - |> Enum.map(fn {node, {mod, fun, args}} = group -> - {:erpc.send_request(node, mod, fun, args), group} - end) - |> Enum.reduce(reducer_acc, fn {req_id, group}, acc -> - try do - res = :erpc.receive_response(req_id, timeout) - reducer_fun.({:ok, res}, group, acc) - rescue - exception -> - reducer_fun.({:error, exception}, group, acc) - catch - :exit, reason -> - reducer_fun.({:error, {:exit, reason}}, group, acc) - end - end) - end - - def rpc_multi_call(_supervisor, nodes, mod, fun, args, opts) do - {reducer_acc, reducer_fun} = opts[:reducer] || default_reducer() - - nodes - |> :erpc.multicall(mod, fun, args, opts[:timeout] || 5000) - |> :lists.zip(nodes) - |> Enum.reduce(reducer_acc, fn {res, node}, acc -> - reducer_fun.(res, node, acc) - end) - end - else - # TODO: This approach by using distributed tasks will be deprecated in the - # future in favor of `:erpc` which is proven to improve performance - # almost by 3x. - - defp rpc_call(_supervisor, node, mod, fun, args, _timeout) when node == node() do - apply(mod, fun, args) - rescue - # FIXME: this is because coveralls does not check this as covered - # coveralls-ignore-start - exception -> - {:badrpc, exception} - # coveralls-ignore-stop - end - - defp rpc_call(supervisor, node, mod, fun, args, timeout) do - {supervisor, node} - |> Task.Supervisor.async_nolink( - __MODULE__, - :call, - [supervisor, node, mod, fun, args, timeout] - ) - |> Task.await(timeout) - end - - defp rpc_multi_call(supervisor, node_group, opts) do - node_group - |> Enum.map(fn {node, {mod, fun, args}} -> - Task.Supervisor.async_nolink({supervisor, node}, mod, fun, args) - end) - |> handle_multi_call(node_group, opts) - end - - defp rpc_multi_call(supervisor, nodes, mod, fun, args, opts) do - rpc_multi_call(supervisor, Enum.map(nodes, &{&1, {mod, fun, args}}), opts) - end - - defp handle_multi_call(tasks, node_group, opts) do - {reducer_acc, reducer_fun} = Keyword.get(opts, :reducer, default_reducer()) - - tasks - |> Task.yield_many(opts[:timeout] || 5000) - |> :lists.zip(node_group) - |> Enum.reduce(reducer_acc, fn - {{_task, {:ok, res}}, group}, acc -> - reducer_fun.({:ok, res}, group, acc) - - {{_task, {:exit, reason}}, group}, acc -> - reducer_fun.({:error, {:exit, reason}}, group, acc) - - {{task, nil}, group}, acc -> - _ = Task.shutdown(task, :brutal_kill) - reducer_fun.({:error, :timeout}, group, acc) - end) - end - end - - defp default_reducer do - { - {[], []}, - fn - {:ok, res}, _node_callback, {ok, err} -> - {[res | ok], err} - - {kind, _} = error, node_callback, {ok, err} when kind in [:error, :exit, :throw] -> - {ok, [{error, node_callback} | err]} - end - } - end -end diff --git a/lib/nebulex/stats.ex b/lib/nebulex/stats.ex deleted file mode 100644 index 957af598..00000000 --- a/lib/nebulex/stats.ex +++ /dev/null @@ -1,46 +0,0 @@ -defmodule Nebulex.Stats do - @moduledoc """ - Stats data type. - - Stats struct defines two main keys: - - * `:measurements` - A map with the measurements provided by the underlying - adapter. - * `:metadata` - A map for including additional information; also provided - by the underlying adapter. - - ## Measurements - - The following measurements are expected to be present and fed by the - underlying adapter: - - * `:evictions` - When a cache entry is removed. - * `:expirations` - When a cache entry is expired. - * `:hits` - When a key is looked up in cache and found. - * `:misses` - When a key is looked up in cache but not found. - * `:updates` - When an existing cache entry is or updated. - * `:writes` - When a cache entry is inserted or overwritten. - - ## Metadata - - Despite the adapters can include any additional or custom metadata, It is - recommended they include the following keys: - - * `:cache` - The cache module, or the name (if an explicit name has been - given to the cache). - - **IMPORTANT:** Since the adapter may include any additional or custom - measurements, as well as metadata, it is recommended to check out the - adapter's documentation. - """ - - # Stats data type - defstruct measurements: %{}, - metadata: %{} - - @typedoc "Nebulex.Stats data type" - @type t :: %__MODULE__{ - measurements: %{optional(atom) => term}, - metadata: %{optional(atom) => term} - } -end diff --git a/lib/nebulex/telemetry.ex b/lib/nebulex/telemetry.ex index 3a48fe8f..2fca8450 100644 --- a/lib/nebulex/telemetry.ex +++ b/lib/nebulex/telemetry.ex @@ -7,12 +7,16 @@ defmodule Nebulex.Telemetry do @compile {:inline, execute: 3, span: 3, attach_many: 4, detach: 1} if Code.ensure_loaded?(:telemetry) do + @doc false defdelegate execute(event, measurements, metadata), to: :telemetry + @doc false defdelegate span(event_prefix, start_meta, span_fn), to: :telemetry + @doc false defdelegate attach_many(handler_id, events, fun, config), to: :telemetry + @doc false defdelegate detach(handler_id), to: :telemetry else @doc false diff --git a/lib/nebulex/telemetry/stats_handler.ex b/lib/nebulex/telemetry/stats_handler.ex deleted file mode 100644 index 141c560c..00000000 --- a/lib/nebulex/telemetry/stats_handler.ex +++ /dev/null @@ -1,109 +0,0 @@ -defmodule Nebulex.Telemetry.StatsHandler do - @moduledoc """ - Telemetry handler for aggregating cache stats; it relies on the default stats - implementation based on Erlang counters. See `Nebulex.Adapter.Stats`. - - This handler is used by the built-in local adapter when the option `:stats` - is set to `true`. - """ - - alias Nebulex.Adapter.Stats - - ## Handler - - @doc false - def handle_event(_event, _measurements, %{adapter_meta: %{stats_counter: ref}} = metadata, ref) do - update_stats(metadata) - end - - # coveralls-ignore-start - - def handle_event(_event, _measurements, _metadata, _ref) do - :ok - end - - # coveralls-ignore-stop - - defp update_stats(%{ - function_name: action, - result: :"$expired", - adapter_meta: %{stats_counter: ref} - }) - when action in [:get, :take, :ttl] do - :ok = Stats.incr(ref, :misses) - :ok = Stats.incr(ref, :evictions) - :ok = Stats.incr(ref, :expirations) - end - - defp update_stats(%{function_name: action, result: nil, adapter_meta: %{stats_counter: ref}}) - when action in [:get, :take, :ttl] do - :ok = Stats.incr(ref, :misses) - end - - defp update_stats(%{function_name: action, result: _, adapter_meta: %{stats_counter: ref}}) - when action in [:get, :ttl] do - :ok = Stats.incr(ref, :hits) - end - - defp update_stats(%{function_name: :take, result: _, adapter_meta: %{stats_counter: ref}}) do - :ok = Stats.incr(ref, :hits) - :ok = Stats.incr(ref, :evictions) - end - - defp update_stats(%{ - function_name: :put, - args: [_, _, _, :replace, _], - result: true, - adapter_meta: %{stats_counter: ref} - }) do - :ok = Stats.incr(ref, :updates) - end - - defp update_stats(%{function_name: :put, result: true, adapter_meta: %{stats_counter: ref}}) do - :ok = Stats.incr(ref, :writes) - end - - defp update_stats(%{ - function_name: :put_all, - result: true, - args: [entries | _], - adapter_meta: %{stats_counter: ref} - }) do - :ok = Stats.incr(ref, :writes, Enum.count(entries)) - end - - defp update_stats(%{function_name: :delete, result: _, adapter_meta: %{stats_counter: ref}}) do - :ok = Stats.incr(ref, :evictions) - end - - defp update_stats(%{ - function_name: :execute, - args: [:delete_all | _], - result: result, - adapter_meta: %{stats_counter: ref} - }) do - :ok = Stats.incr(ref, :evictions, result) - end - - defp update_stats(%{function_name: action, result: true, adapter_meta: %{stats_counter: ref}}) - when action in [:expire, :touch] do - :ok = Stats.incr(ref, :updates) - end - - defp update_stats(%{ - function_name: :update_counter, - args: [_, amount, _, default, _], - result: result, - adapter_meta: %{stats_counter: ref} - }) do - offset = if amount >= 0, do: -1, else: 1 - - if result + amount * offset === default do - :ok = Stats.incr(ref, :writes) - else - :ok = Stats.incr(ref, :updates) - end - end - - defp update_stats(_), do: :ok -end diff --git a/lib/nebulex/time.ex b/lib/nebulex/time.ex index c1191992..e0e960e7 100644 --- a/lib/nebulex/time.ex +++ b/lib/nebulex/time.ex @@ -44,7 +44,7 @@ defmodule Nebulex.Time do false """ - @spec timeout?(term) :: boolean + @spec timeout?(any()) :: boolean() def timeout?(timeout) do (is_integer(timeout) and timeout >= 0) or timeout == :infinity end diff --git a/lib/nebulex/utils.ex b/lib/nebulex/utils.ex new file mode 100644 index 00000000..a56066b9 --- /dev/null +++ b/lib/nebulex/utils.ex @@ -0,0 +1,117 @@ +defmodule Nebulex.Utils do + # Module for general purpose utilities. + @moduledoc false + + ## API + + @doc """ + A wrapper for `Keyword.get/3` but validates the returned value invoking + the function `valid?`. + + Raises an `ArgumentError` in case the validation fails. + + ## Examples + + iex> Nebulex.Utils.get_option( + ...> [keys: [1, 2, 3]], + ...> :keys, + ...> "a list with at least one element", + ...> &((is_list(&1) and length(&1) > 0) or is_nil(&1)) + ...> ) + [1, 2, 3] + + iex> Nebulex.Utils.get_option( + ...> [], + ...> :keys, + ...> "a list with at least one element", + ...> &((is_list(&1) and length(&1) > 0) or is_nil(&1)) + ...> ) + nil + + iex> Nebulex.Utils.get_option( + ...> [keys: 123], + ...> :keys, + ...> "a list with at least one element", + ...> &((is_list(&1) and length(&1) > 0) or is_nil(&1)) + ...> ) + ** (ArgumentError) expected keys: to be a list with at least one element, got: 123 + + """ + @spec get_option(keyword(), atom(), binary(), (any() -> boolean()), any()) :: any() + def get_option(opts, key, expected, valid?, default \\ nil) + when is_list(opts) and is_atom(key) do + case Keyword.fetch(opts, key) do + {:ok, value} -> + if valid?.(value) do + value + else + raise ArgumentError, "expected #{key}: to be #{expected}, got: #{inspect(value)}" + end + + :error -> + default + end + end + + @doc """ + Returns the implemented behaviours for the given `module`. + """ + @spec module_behaviours(module()) :: [module()] + def module_behaviours(module) do + for {:behaviour, behaviours} <- module.__info__(:attributes), behaviour <- behaviours do + behaviour + end + end + + @doc """ + Concatenates a list of "camelized" aliases and returns a new alias. + + It handles binaries, atoms, and numbers. + + ## Examples + + iex> Nebulex.Utils.camelize_and_concat([Foo, :bar]) + Foo.Bar + + iex> Nebulex.Utils.camelize_and_concat([Foo, "bar"]) + Foo.Bar + + iex> Nebulex.Utils.camelize_and_concat([Foo, "Bar", 1]) + :"Elixir.Foo.Bar.1" + + """ + @spec camelize_and_concat([atom() | binary() | number()]) :: atom() + def camelize_and_concat(list) when is_list(list) do + list + |> Enum.map(&Macro.camelize("#{&1}")) + |> Module.concat() + end + + ## Macros + + @doc false + defmacro unwrap_or_raise(call) do + quote do + case unquote(call) do + {:ok, value} -> value + {:error, reason} when is_exception(reason) -> raise reason + {:error, reason} -> raise Nebulex.Error, reason: reason + other -> other + end + end + end + + @doc false + defmacro wrap_ok(call) do + quote do + {:ok, unquote(call)} + end + end + + @doc false + defmacro wrap_error(exception, opts) do + quote do + {:error, unquote(exception).exception(unquote(opts))} + end + end +end diff --git a/mix.exs b/mix.exs index 69b84f51..814b945f 100644 --- a/mix.exs +++ b/mix.exs @@ -2,13 +2,13 @@ defmodule Nebulex.MixProject do use Mix.Project @source_url "https://github.com/cabol/nebulex" - @version "2.5.2" + @version "3.0.0-dev" def project do [ app: :nebulex, version: @version, - elixir: "~> 1.9", + elixir: "~> 1.11", elixirc_paths: elixirc_paths(Mix.env()), aliases: aliases(), deps: deps(), @@ -36,7 +36,7 @@ defmodule Nebulex.MixProject do ] end - defp elixirc_paths(:test), do: ["lib", "test/support", "test/dialyzer"] + defp elixirc_paths(:test), do: ["lib", "test/dialyzer"] defp elixirc_paths(_), do: ["lib"] def application do @@ -48,26 +48,28 @@ defmodule Nebulex.MixProject do defp deps do [ - {:shards, "~> 1.1", optional: true}, + # Required + {:nimble_options, "~> 0.5 or ~> 1.0"}, + + # Optional {:decorator, "~> 1.4", optional: true}, - {:telemetry, "~> 0.4 or ~> 1.0", optional: true}, + {:telemetry, "~> 1.2", optional: true}, # Test & Code Analysis - {:ex2ms, "~> 1.6", only: :test}, - {:mock, "~> 0.3", only: :test}, - {:excoveralls, "~> 0.14", only: :test}, - {:credo, "~> 1.6", only: [:dev, :test], runtime: false}, - {:dialyxir, "~> 1.2", only: [:dev, :test], runtime: false}, - {:sobelow, "~> 0.11", only: [:dev, :test], runtime: false}, - {:stream_data, "~> 0.5", only: [:dev, :test]}, + {:excoveralls, "~> 0.17", only: :test}, + {:credo, "~> 1.7", only: [:dev, :test], runtime: false}, + {:dialyxir, "~> 1.4", only: [:dev, :test], runtime: false}, + {:sobelow, "~> 0.13", only: [:dev, :test], runtime: false}, + {:stream_data, "~> 0.6", only: [:dev, :test]}, + {:mimic, "~> 1.7", only: :test}, + {:doctor, "~> 0.21", only: [:dev, :test]}, # Benchmark Test {:benchee, "~> 1.1", only: [:dev, :test]}, {:benchee_html, "~> 1.0", only: [:dev, :test]}, # Docs - {:ex_doc, "~> 0.28", only: [:dev, :test], runtime: false}, - {:inch_ex, "~> 2.0", only: :docs} + {:ex_doc, "~> 0.20", only: [:dev, :test], runtime: false} ] end @@ -78,8 +80,9 @@ defmodule Nebulex.MixProject do "format --check-formatted", "credo --strict", "coveralls.html", - "sobelow --exit --skip", - "dialyzer --format short" + "sobelow --skip --exit Low", + "dialyzer --format short", + "doctor" ] ] end @@ -108,13 +111,29 @@ defmodule Nebulex.MixProject do "guides/telemetry.md", "guides/migrating-to-v2.md", "guides/creating-new-adapter.md" + ], + groups_for_functions: [ + # Caching decorators + group_for_function("Decorator API"), + group_for_function("Decorator Helpers"), + group_for_function("Internal API"), + # Cache API + group_for_function("User callbacks"), + group_for_function("Runtime API"), + group_for_function("KV API"), + group_for_function("Query API"), + group_for_function("Persistence API"), + group_for_function("Transaction API"), + group_for_function("Info API") ] ] end + defp group_for_function(group), do: {String.to_atom(group), &(&1[:group] == group)} + defp dialyzer do [ - plt_add_apps: [:shards, :mix, :telemetry], + plt_add_apps: [:mix, :telemetry, :ex_unit], plt_file: {:no_warn, "priv/plts/" <> plt_file_name()}, flags: [ :unmatched_returns, diff --git a/mix.lock b/mix.lock index 1f8e278e..d1ccf14d 100644 --- a/mix.lock +++ b/mix.lock @@ -3,35 +3,26 @@ "benchee_html": {:hex, :benchee_html, "1.0.0", "5b4d24effebd060f466fb460ec06576e7b34a00fc26b234fe4f12c4f05c95947", [:mix], [{:benchee, ">= 0.99.0 and < 2.0.0", [hex: :benchee, repo: "hexpm", optional: false]}, {:benchee_json, "~> 1.0", [hex: :benchee_json, repo: "hexpm", optional: false]}], "hexpm", "5280af9aac432ff5ca4216d03e8a93f32209510e925b60e7f27c33796f69e699"}, "benchee_json": {:hex, :benchee_json, "1.0.0", "cc661f4454d5995c08fe10dd1f2f72f229c8f0fb1c96f6b327a8c8fc96a91fe5", [:mix], [{:benchee, ">= 0.99.0 and < 2.0.0", [hex: :benchee, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "da05d813f9123505f870344d68fb7c86a4f0f9074df7d7b7e2bb011a63ec231c"}, "bunt": {:hex, :bunt, "0.2.1", "e2d4792f7bc0ced7583ab54922808919518d0e57ee162901a16a1b6664ef3b14", [:mix], [], "hexpm", "a330bfb4245239787b15005e66ae6845c9cd524a288f0d141c148b02603777a5"}, - "certifi": {:hex, :certifi, "2.9.0", "6f2a475689dd47f19fb74334859d460a2dc4e3252a3324bd2111b8f0429e7e21", [:rebar3], [], "hexpm", "266da46bdb06d6c6d35fde799bcb28d36d985d424ad7c08b5bb48f5b5cdd4641"}, - "credo": {:hex, :credo, "1.7.0", "6119bee47272e85995598ee04f2ebbed3e947678dee048d10b5feca139435f75", [:mix], [{:bunt, "~> 0.2.1", [hex: :bunt, repo: "hexpm", optional: false]}, {:file_system, "~> 0.2.8", [hex: :file_system, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "6839fcf63d1f0d1c0f450abc8564a57c43d644077ab96f2934563e68b8a769d7"}, + "credo": {:hex, :credo, "1.7.1", "6e26bbcc9e22eefbff7e43188e69924e78818e2fe6282487d0703652bc20fd62", [:mix], [{:bunt, "~> 0.2.1", [hex: :bunt, repo: "hexpm", optional: false]}, {:file_system, "~> 0.2.8", [hex: :file_system, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "e9871c6095a4c0381c89b6aa98bc6260a8ba6addccf7f6a53da8849c748a58a2"}, + "decimal": {:hex, :decimal, "2.1.1", "5611dca5d4b2c3dd497dec8f68751f1f1a54755e8ed2a966c2633cf885973ad6", [:mix], [], "hexpm", "53cfe5f497ed0e7771ae1a475575603d77425099ba5faef9394932b35020ffcc"}, "decorator": {:hex, :decorator, "1.4.0", "a57ac32c823ea7e4e67f5af56412d12b33274661bb7640ec7fc882f8d23ac419", [:mix], [], "hexpm", "0a07cedd9083da875c7418dea95b78361197cf2bf3211d743f6f7ce39656597f"}, "deep_merge": {:hex, :deep_merge, "1.0.0", "b4aa1a0d1acac393bdf38b2291af38cb1d4a52806cf7a4906f718e1feb5ee961", [:mix], [], "hexpm", "ce708e5f094b9cd4e8f2be4f00d2f4250c4095be93f8cd6d018c753894885430"}, - "dialyxir": {:hex, :dialyxir, "1.3.0", "fd1672f0922b7648ff9ce7b1b26fcf0ef56dda964a459892ad15f6b4410b5284", [:mix], [{:erlex, ">= 0.2.6", [hex: :erlex, repo: "hexpm", optional: false]}], "hexpm", "00b2a4bcd6aa8db9dcb0b38c1225b7277dca9bc370b6438715667071a304696f"}, - "earmark_parser": {:hex, :earmark_parser, "1.4.32", "fa739a0ecfa34493de19426681b23f6814573faee95dfd4b4aafe15a7b5b32c6", [:mix], [], "hexpm", "b8b0dd77d60373e77a3d7e8afa598f325e49e8663a51bcc2b88ef41838cca755"}, + "dialyxir": {:hex, :dialyxir, "1.4.1", "a22ed1e7bd3a3e3f197b68d806ef66acb61ee8f57b3ac85fc5d57354c5482a93", [:mix], [{:erlex, ">= 0.2.6", [hex: :erlex, repo: "hexpm", optional: false]}], "hexpm", "84b795d6d7796297cca5a3118444b80c7d94f7ce247d49886e7c291e1ae49801"}, + "doctor": {:hex, :doctor, "0.21.0", "20ef89355c67778e206225fe74913e96141c4d001cb04efdeba1a2a9704f1ab5", [:mix], [{:decimal, "~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}], "hexpm", "a227831daa79784eb24cdeedfa403c46a4cb7d0eab0e31232ec654314447e4e0"}, + "earmark_parser": {:hex, :earmark_parser, "1.4.37", "2ad73550e27c8946648b06905a57e4d454e4d7229c2dafa72a0348c99d8be5f7", [:mix], [], "hexpm", "6b19783f2802f039806f375610faa22da130b8edc21209d0bff47918bb48360e"}, "erlex": {:hex, :erlex, "0.2.6", "c7987d15e899c7a2f34f5420d2a2ea0d659682c06ac607572df55a43753aa12e", [:mix], [], "hexpm", "2ed2e25711feb44d52b17d2780eabf998452f6efda104877a3881c2f8c0c0c75"}, - "ex2ms": {:hex, :ex2ms, "1.6.1", "66d472eb14da43087c156e0396bac3cc7176b4f24590a251db53f84e9a0f5f72", [:mix], [], "hexpm", "a7192899d84af03823a8ec2f306fa858cbcce2c2e7fd0f1c49e05168fb9c740e"}, - "ex_doc": {:hex, :ex_doc, "0.29.4", "6257ecbb20c7396b1fe5accd55b7b0d23f44b6aa18017b415cb4c2b91d997729", [:mix], [{:earmark_parser, "~> 1.4.31", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_elixir, "~> 0.14", [hex: :makeup_elixir, repo: "hexpm", optional: false]}, {:makeup_erlang, "~> 0.1", [hex: :makeup_erlang, repo: "hexpm", optional: false]}], "hexpm", "2c6699a737ae46cb61e4ed012af931b57b699643b24dabe2400a8168414bc4f5"}, - "excoveralls": {:hex, :excoveralls, "0.16.1", "0bd42ed05c7d2f4d180331a20113ec537be509da31fed5c8f7047ce59ee5a7c5", [:mix], [{:hackney, "~> 1.16", [hex: :hackney, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "dae763468e2008cf7075a64cb1249c97cb4bc71e236c5c2b5e5cdf1cfa2bf138"}, + "ex_doc": {:hex, :ex_doc, "0.30.6", "5f8b54854b240a2b55c9734c4b1d0dd7bdd41f71a095d42a70445c03cf05a281", [:mix], [{:earmark_parser, "~> 1.4.31", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_elixir, "~> 0.14", [hex: :makeup_elixir, repo: "hexpm", optional: false]}, {:makeup_erlang, "~> 0.1", [hex: :makeup_erlang, repo: "hexpm", optional: false]}], "hexpm", "bd48f2ddacf4e482c727f9293d9498e0881597eae6ddc3d9562bd7923375109f"}, + "excoveralls": {:hex, :excoveralls, "0.17.1", "83fa7906ef23aa7fc8ad7ee469c357a63b1b3d55dd701ff5b9ce1f72442b2874", [:mix], [{:castore, "~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "95bc6fda953e84c60f14da4a198880336205464e75383ec0f570180567985ae0"}, "file_system": {:hex, :file_system, "0.2.10", "fb082005a9cd1711c05b5248710f8826b02d7d1784e7c3451f9c1231d4fc162d", [:mix], [], "hexpm", "41195edbfb562a593726eda3b3e8b103a309b733ad25f3d642ba49696bf715dc"}, - "hackney": {:hex, :hackney, "1.18.1", "f48bf88f521f2a229fc7bae88cf4f85adc9cd9bcf23b5dc8eb6a1788c662c4f6", [:rebar3], [{:certifi, "~> 2.9.0", [hex: :certifi, repo: "hexpm", optional: false]}, {:idna, "~> 6.1.0", [hex: :idna, repo: "hexpm", optional: false]}, {:metrics, "~> 1.0.0", [hex: :metrics, repo: "hexpm", optional: false]}, {:mimerl, "~> 1.1", [hex: :mimerl, repo: "hexpm", optional: false]}, {:parse_trans, "3.3.1", [hex: :parse_trans, repo: "hexpm", optional: false]}, {:ssl_verify_fun, "~> 1.1.0", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}, {:unicode_util_compat, "~> 0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "a4ecdaff44297e9b5894ae499e9a070ea1888c84afdd1fd9b7b2bc384950128e"}, - "idna": {:hex, :idna, "6.1.1", "8a63070e9f7d0c62eb9d9fcb360a7de382448200fbbd1b106cc96d3d8099df8d", [:rebar3], [{:unicode_util_compat, "~> 0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "92376eb7894412ed19ac475e4a86f7b413c1b9fbb5bd16dccd57934157944cea"}, - "inch_ex": {:hex, :inch_ex, "2.0.0", "24268a9284a1751f2ceda569cd978e1fa394c977c45c331bb52a405de544f4de", [:mix], [{:bunt, "~> 0.2", [hex: :bunt, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "96d0ec5ecac8cf63142d02f16b7ab7152cf0f0f1a185a80161b758383c9399a8"}, - "jason": {:hex, :jason, "1.4.0", "e855647bc964a44e2f67df589ccf49105ae039d4179db7f6271dfd3843dc27e6", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "79a3791085b2a0f743ca04cec0f7be26443738779d09302e01318f97bdb82121"}, + "jason": {:hex, :jason, "1.4.1", "af1504e35f629ddcdd6addb3513c3853991f694921b1b9368b0bd32beb9f1b63", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "fbb01ecdfd565b56261302f7e1fcc27c4fb8f32d56eab74db621fc154604a7a1"}, "makeup": {:hex, :makeup, "1.1.0", "6b67c8bc2882a6b6a445859952a602afc1a41c2e08379ca057c0f525366fc3ca", [:mix], [{:nimble_parsec, "~> 1.2.2 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "0a45ed501f4a8897f580eabf99a2e5234ea3e75a4373c8a52824f6e873be57a6"}, "makeup_elixir": {:hex, :makeup_elixir, "0.16.1", "cc9e3ca312f1cfeccc572b37a09980287e243648108384b97ff2b76e505c3555", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}, {:nimble_parsec, "~> 1.2.3 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "e127a341ad1b209bd80f7bd1620a15693a9908ed780c3b763bccf7d200c767c6"}, - "makeup_erlang": {:hex, :makeup_erlang, "0.1.1", "3fcb7f09eb9d98dc4d208f49cc955a34218fc41ff6b84df7c75b3e6e533cc65f", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}], "hexpm", "174d0809e98a4ef0b3309256cbf97101c6ec01c4ab0b23e926a9e17df2077cbb"}, - "meck": {:hex, :meck, "0.9.2", "85ccbab053f1db86c7ca240e9fc718170ee5bda03810a6292b5306bf31bae5f5", [:rebar3], [], "hexpm", "81344f561357dc40a8344afa53767c32669153355b626ea9fcbc8da6b3045826"}, - "metrics": {:hex, :metrics, "1.0.1", "25f094dea2cda98213cecc3aeff09e940299d950904393b2a29d191c346a8486", [:rebar3], [], "hexpm", "69b09adddc4f74a40716ae54d140f93beb0fb8978d8636eaded0c31b6f099f16"}, - "mimerl": {:hex, :mimerl, "1.2.0", "67e2d3f571088d5cfd3e550c383094b47159f3eee8ffa08e64106cdf5e981be3", [:rebar3], [], "hexpm", "f278585650aa581986264638ebf698f8bb19df297f66ad91b18910dfc6e19323"}, - "mock": {:hex, :mock, "0.3.7", "75b3bbf1466d7e486ea2052a73c6e062c6256fb429d6797999ab02fa32f29e03", [:mix], [{:meck, "~> 0.9.2", [hex: :meck, repo: "hexpm", optional: false]}], "hexpm", "4da49a4609e41fd99b7836945c26f373623ea968cfb6282742bcb94440cf7e5c"}, + "makeup_erlang": {:hex, :makeup_erlang, "0.1.2", "ad87296a092a46e03b7e9b0be7631ddcf64c790fa68a9ef5323b6cbb36affc72", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}], "hexpm", "f3f5a1ca93ce6e092d92b6d9c049bcda58a3b617a8d888f8e7231c85630e8108"}, + "mimic": {:hex, :mimic, "1.7.4", "cd2772ffbc9edefe964bc668bfd4059487fa639a5b7f1cbdf4fd22946505aa4f", [:mix], [], "hexpm", "437c61041ecf8a7fae35763ce89859e4973bb0666e6ce76d75efc789204447c3"}, + "nimble_options": {:hex, :nimble_options, "1.0.2", "92098a74df0072ff37d0c12ace58574d26880e522c22801437151a159392270e", [:mix], [], "hexpm", "fd12a8db2021036ce12a309f26f564ec367373265b53e25403f0ee697380f1b8"}, "nimble_parsec": {:hex, :nimble_parsec, "1.3.1", "2c54013ecf170e249e9291ed0a62e5832f70a476c61da16f6aac6dca0189f2af", [:mix], [], "hexpm", "2682e3c0b2eb58d90c6375fc0cc30bc7be06f365bf72608804fb9cffa5e1b167"}, - "parse_trans": {:hex, :parse_trans, "3.3.1", "16328ab840cc09919bd10dab29e431da3af9e9e7e7e6f0089dd5a2d2820011d8", [:rebar3], [], "hexpm", "07cd9577885f56362d414e8c4c4e6bdf10d43a8767abb92d24cbe8b24c54888b"}, - "shards": {:hex, :shards, "1.1.0", "ed3032e63ae99f0eaa6d012b8b9f9cead48b9a810b3f91aeac266cfc4118eff6", [:make, :rebar3], [], "hexpm", "1d188e565a54a458a7a601c2fd1e74f5cfeba755c5a534239266d28b7ff124c7"}, - "sobelow": {:hex, :sobelow, "0.12.2", "45f4d500e09f95fdb5a7b94c2838d6b26625828751d9f1127174055a78542cf5", [:mix], [{:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "2f0b617dce551db651145662b84c8da4f158e7abe049a76daaaae2282df01c5d"}, - "ssl_verify_fun": {:hex, :ssl_verify_fun, "1.1.6", "cf344f5692c82d2cd7554f5ec8fd961548d4fd09e7d22f5b62482e5aeaebd4b0", [:make, :mix, :rebar3], [], "hexpm", "bdb0d2471f453c88ff3908e7686f86f9be327d065cc1ec16fa4540197ea04680"}, + "sobelow": {:hex, :sobelow, "0.13.0", "218afe9075904793f5c64b8837cc356e493d88fddde126a463839351870b8d1e", [:mix], [{:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "cd6e9026b85fc35d7529da14f95e85a078d9dd1907a9097b3ba6ac7ebbe34a0d"}, "statistex": {:hex, :statistex, "1.0.0", "f3dc93f3c0c6c92e5f291704cf62b99b553253d7969e9a5fa713e5481cd858a5", [:mix], [], "hexpm", "ff9d8bee7035028ab4742ff52fc80a2aa35cece833cf5319009b52f1b5a86c27"}, - "stream_data": {:hex, :stream_data, "0.5.0", "b27641e58941685c75b353577dc602c9d2c12292dd84babf506c2033cd97893e", [:mix], [], "hexpm", "012bd2eec069ada4db3411f9115ccafa38540a3c78c4c0349f151fc761b9e271"}, + "stream_data": {:hex, :stream_data, "0.6.0", "e87a9a79d7ec23d10ff83eb025141ef4915eeb09d4491f79e52f2562b73e5f47", [:mix], [], "hexpm", "b92b5031b650ca480ced047578f1d57ea6dd563f5b57464ad274718c9c29501c"}, "telemetry": {:hex, :telemetry, "1.2.1", "68fdfe8d8f05a8428483a97d7aab2f268aaff24b49e0f599faa091f1d4e7f61c", [:rebar3], [], "hexpm", "dad9ce9d8effc621708f99eac538ef1cbe05d6a874dd741de2e689c47feafed5"}, - "unicode_util_compat": {:hex, :unicode_util_compat, "0.7.0", "bc84380c9ab48177092f43ac89e4dfa2c6d62b40b8bd132b1059ecc7232f9a78", [:rebar3], [], "hexpm", "25eee6d67df61960cf6a794239566599b09e17e668d3700247bc498638152521"}, } diff --git a/test/dialyzer/caching_decorators.ex b/test/dialyzer/caching_decorators.ex index 14ae56ef..bd1bcec9 100644 --- a/test/dialyzer/caching_decorators.ex +++ b/test/dialyzer/caching_decorators.ex @@ -4,29 +4,47 @@ defmodule Nebulex.Dialyzer.CachingDecorators do defmodule Account do @moduledoc false - defstruct [:id, :username, :password] - @type t :: %__MODULE__{} + defstruct [:id, :username, :password, :email] + + @type t() :: %__MODULE__{} end + @cache Cache @ttl :timer.seconds(3600) ## Annotated Functions - @spec get_account(integer) :: Account.t() - @decorate cacheable(cache: Cache, key: {Account, id}) + @spec get_account(integer()) :: Account.t() + @decorate cacheable(cache: @cache, key: {Account, id}) def get_account(id) do %Account{id: id} end - @spec get_account_by_username(binary) :: Account.t() - @decorate cacheable(cache: Cache, key: {Account, username}, opts: [ttl: @ttl]) + @spec get_account_by_username(binary()) :: Account.t() + @decorate cacheable( + cache: dynamic_cache(@cache, Cache), + key: {Account, username}, + references: & &1.id, + opts: [ttl: @ttl] + ) def get_account_by_username(username) do %Account{username: username} end + @spec get_account_by_email(Account.t()) :: Account.t() + @decorate cacheable( + cache: YetAnotherCache, + key: email, + references: &keyref(Cache, &1.id), + opts: [ttl: @ttl] + ) + def get_account_by_email(%Account{email: email} = acct) do + %{acct | email: email} + end + @spec update_account(Account.t()) :: {:ok, Account.t()} @decorate cache_put( - cache: Cache, + cache: @cache, keys: [{Account, acct.id}, {Account, acct.username}], match: &match/1, opts: [ttl: @ttl] @@ -35,33 +53,33 @@ defmodule Nebulex.Dialyzer.CachingDecorators do {:ok, acct} end - @spec update_account_by_id(binary, %{optional(atom) => term}) :: {:ok, Account.t()} + @spec update_account_by_id(binary(), %{optional(atom()) => any()}) :: {:ok, Account.t()} @decorate cache_put(cache: Cache, key: {Account, id}, match: &match/1, opts: [ttl: @ttl]) def update_account_by_id(id, attrs) do {:ok, struct(Account, Map.put(attrs, :id, id))} end @spec delete_account(Account.t()) :: Account.t() - @decorate cache_evict(cache: Cache, keys: [{Account, acct.id}, {Account, acct.username}]) + @decorate cache_evict( + cache: &cache_fun/1, + keys: [{Account, acct.id}, {Account, acct.username}] + ) def delete_account(%Account{} = acct) do acct end - @spec delete_all_accounts(term) :: :ok - @decorate cache_evict(cache: Cache, all_entries: true) + @spec delete_all_accounts(any()) :: any() + @decorate cache_evict(cache: &cache_fun/1, all_entries: true) def delete_all_accounts(filter) do filter end - @spec get_user_key(integer) :: binary - @decorate cacheable( - cache: {__MODULE__, :dynamic_cache, [:dynamic]}, - key_generator: {__MODULE__, [id]} - ) + @spec get_user_key(binary()) :: binary() + @decorate cacheable(cache: &cache_fun/1, key: &generate_key/1) def get_user_key(id), do: id - @spec update_user_key(integer) :: binary - @decorate cacheable(cache: Cache, key_generator: {__MODULE__, :generate_key, [id]}) + @spec update_user_key(binary()) :: binary() + @decorate cacheable(cache: Cache, key: &generate_key({"custom", &1.args})) def update_user_key(id), do: id ## Helpers @@ -69,9 +87,11 @@ defmodule Nebulex.Dialyzer.CachingDecorators do defp match({:ok, _} = ok), do: {true, ok} defp match({:error, _}), do: false - def generate(mod, fun, args), do: :erlang.phash2({mod, fun, args}) + def generate_key(ctx), do: :erlang.phash2(ctx) - def generate_key(args), do: :erlang.phash2(args) + def cache_fun(ctx) do + _ = send(self(), ctx) - def dynamic_cache(_, _, _, _), do: Cache + Cache + end end diff --git a/test/mix/nebulex_test.exs b/test/mix/nebulex_test.exs index 505aa21c..b319798d 100644 --- a/test/mix/nebulex_test.exs +++ b/test/mix/nebulex_test.exs @@ -1,14 +1,15 @@ defmodule Mix.NebulexTest do use ExUnit.Case, async: true + use Mimic import Mix.Nebulex - import Mock test "fail because umbrella project" do - with_mock Mix.Project, umbrella?: fn -> true end do - assert_raise Mix.Error, ~r"Cannot run task", fn -> - no_umbrella!("nebulex.gen.cache") - end + Mix.Project + |> expect(:umbrella?, fn -> true end) + + assert_raise Mix.Error, ~r"Cannot run task", fn -> + no_umbrella!("nebulex.gen.cache") end end end diff --git a/test/nebulex/adapter_test.exs b/test/nebulex/adapter_test.exs new file mode 100644 index 00000000..d594f21a --- /dev/null +++ b/test/nebulex/adapter_test.exs @@ -0,0 +1,31 @@ +defmodule Nebulex.AdapterTest do + use ExUnit.Case, async: true + + describe "defcommand/2" do + test "ok: function is created" do + defmodule Test1 do + import Nebulex.Adapter, only: [defcommand: 1, defcommand: 2] + + defcommand c1(a1) + + defcommand c2(a1), command: :c11 + + defcommand c3(a1), command: :c11, largs: [:l], rargs: [:r] + end + end + end + + describe "defcommandp/2" do + test "ok: function is created" do + defmodule Test2 do + import Nebulex.Adapter, only: [defcommandp: 1, defcommandp: 2] + + defcommandp c1(a1) + + defcommandp c2(a1), command: :c11 + + defcommandp c3(a1), command: :c11, largs: [:l], rargs: [:r] + end + end + end +end diff --git a/test/nebulex/adapters/local/generation_test.exs b/test/nebulex/adapters/local/generation_test.exs deleted file mode 100644 index 8f2f42c7..00000000 --- a/test/nebulex/adapters/local/generation_test.exs +++ /dev/null @@ -1,380 +0,0 @@ -defmodule Nebulex.Adapters.Local.GenerationTest do - use ExUnit.Case, async: true - - defmodule LocalWithSizeLimit do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local, - gc_interval: :timer.hours(1) - end - - import Nebulex.CacheCase - - alias Nebulex.Adapters.Local.Generation - alias Nebulex.Adapters.Local.GenerationTest.LocalWithSizeLimit - alias Nebulex.TestCache.Cache - - describe "init" do - test "ok: with default options" do - assert {:ok, _pid} = LocalWithSizeLimit.start_link() - - assert %Nebulex.Adapters.Local.Generation{ - allocated_memory: nil, - backend: :ets, - backend_opts: [ - :set, - :public, - {:keypos, 2}, - {:read_concurrency, true}, - {:write_concurrency, true} - ], - gc_cleanup_max_timeout: 600_000, - gc_cleanup_min_timeout: 10_000, - gc_cleanup_ref: nil, - gc_heartbeat_ref: nil, - gc_interval: nil, - max_size: nil, - meta_tab: meta_tab, - stats_counter: nil - } = Generation.get_state(LocalWithSizeLimit) - - assert is_reference(meta_tab) - - :ok = LocalWithSizeLimit.stop() - end - - test "ok: with custom options" do - assert {:ok, _pid} = - LocalWithSizeLimit.start_link( - gc_interval: 10, - max_size: 10, - allocated_memory: 1000 - ) - - assert %Nebulex.Adapters.Local.Generation{ - allocated_memory: 1000, - backend: :ets, - backend_opts: [ - :set, - :public, - {:keypos, 2}, - {:read_concurrency, true}, - {:write_concurrency, true} - ], - gc_cleanup_max_timeout: 600_000, - gc_cleanup_min_timeout: 10_000, - gc_cleanup_ref: gc_cleanup_ref, - gc_heartbeat_ref: gc_heartbeat_ref, - gc_interval: 10, - max_size: 10, - meta_tab: meta_tab, - stats_counter: nil - } = Generation.get_state(LocalWithSizeLimit) - - assert is_reference(gc_cleanup_ref) - assert is_reference(gc_heartbeat_ref) - assert is_reference(meta_tab) - - :ok = LocalWithSizeLimit.stop() - end - - test "error: invalid gc_cleanup_min_timeout" do - _ = Process.flag(:trap_exit, true) - - assert {:error, {:shutdown, {_, _, {:shutdown, {_, _, {%ArgumentError{message: err}, _}}}}}} = - LocalWithSizeLimit.start_link( - gc_interval: 3600, - gc_cleanup_min_timeout: -1, - gc_cleanup_max_timeout: -1 - ) - - assert err == "expected gc_cleanup_min_timeout: to be an integer > 0, got: -1" - end - end - - describe "gc" do - setup_with_dynamic_cache(Cache, :gc_test, - backend: :shards, - gc_interval: 1000, - compressed: true - ) - - test "create generations", %{cache: cache, name: name} do - assert generations_len(name) == 1 - - :ok = Process.sleep(1020) - assert generations_len(name) == 2 - - assert cache.delete_all() == 0 - - :ok = Process.sleep(1020) - assert generations_len(name) == 2 - end - - test "create new generation and reset timeout", %{cache: cache, name: name} do - assert generations_len(name) == 1 - - :ok = Process.sleep(800) - - cache.with_dynamic_cache(name, fn -> - cache.new_generation() - end) - - assert generations_len(name) == 2 - - :ok = Process.sleep(500) - assert generations_len(name) == 2 - - :ok = Process.sleep(520) - assert generations_len(name) == 2 - end - - test "create new generation without reset timeout", %{cache: cache, name: name} do - assert generations_len(name) == 1 - - :ok = Process.sleep(800) - - cache.with_dynamic_cache(name, fn -> - cache.new_generation(reset_timer: false) - end) - - assert generations_len(name) == 2 - - :ok = Process.sleep(500) - assert generations_len(name) == 2 - end - - test "reset timer", %{cache: cache, name: name} do - assert generations_len(name) == 1 - - :ok = Process.sleep(800) - - cache.with_dynamic_cache(name, fn -> - cache.reset_generation_timer() - end) - - :ok = Process.sleep(220) - assert generations_len(name) == 1 - - :ok = Process.sleep(1000) - assert generations_len(name) == 2 - end - end - - describe "allocated memory" do - test "cleanup is triggered when max generation size is reached" do - {:ok, _pid} = - LocalWithSizeLimit.start_link( - gc_interval: 3_600_000, - allocated_memory: 100_000, - gc_cleanup_min_timeout: 1000, - gc_cleanup_max_timeout: 3000 - ) - - assert generations_len(LocalWithSizeLimit) == 1 - - {mem_size, _} = Generation.memory_info(LocalWithSizeLimit) - :ok = Generation.realloc(LocalWithSizeLimit, mem_size * 2) - - # triggers the cleanup event - :ok = check_cache_size(LocalWithSizeLimit) - - :ok = flood_cache(mem_size, mem_size * 2) - assert generations_len(LocalWithSizeLimit) == 1 - assert_mem_size(:>) - - # wait until the cleanup event is triggered - :ok = Process.sleep(3100) - - wait_until(fn -> - assert generations_len(LocalWithSizeLimit) == 2 - assert_mem_size(:<=) - end) - - :ok = flood_cache(mem_size, mem_size * 2) - - wait_until(fn -> - assert generations_len(LocalWithSizeLimit) == 2 - assert_mem_size(:>) - end) - - :ok = flood_cache(mem_size, mem_size * 2) - - wait_until(fn -> - assert generations_len(LocalWithSizeLimit) == 2 - assert_mem_size(:>) - end) - - # triggers the cleanup event - :ok = check_cache_size(LocalWithSizeLimit) - - assert generations_len(LocalWithSizeLimit) == 2 - - :ok = LocalWithSizeLimit.stop() - end - - test "cleanup while cache is being used" do - {:ok, _pid} = - LocalWithSizeLimit.start_link( - gc_interval: 3_600_000, - allocated_memory: 100, - gc_cleanup_min_timeout: 1000, - gc_cleanup_max_timeout: 3000 - ) - - assert generations_len(LocalWithSizeLimit) == 1 - - tasks = for i <- 1..3, do: Task.async(fn -> task_fun(LocalWithSizeLimit, i) end) - - for _ <- 1..100 do - :ok = Process.sleep(10) - - LocalWithSizeLimit - |> Generation.server() - |> send(:cleanup) - end - - for task <- tasks, do: Task.shutdown(task) - - :ok = LocalWithSizeLimit.stop() - end - end - - describe "max size" do - test "cleanup is triggered when size limit is reached" do - {:ok, _pid} = - LocalWithSizeLimit.start_link( - gc_interval: 3_600_000, - max_size: 3, - gc_cleanup_min_timeout: 1000, - gc_cleanup_max_timeout: 1500 - ) - - # Initially there should be only 1 generation and no entries - assert generations_len(LocalWithSizeLimit) == 1 - assert LocalWithSizeLimit.count_all() == 0 - - # Put some entries to exceed the max size - _ = cache_put(LocalWithSizeLimit, 1..4) - - # Validate current size - assert LocalWithSizeLimit.count_all() == 4 - - # Wait the max cleanup timeout - :ok = Process.sleep(1600) - - # There should be 2 generation now - assert generations_len(LocalWithSizeLimit) == 2 - - # The entries should be now in the older generation - assert LocalWithSizeLimit.count_all() == 4 - - # Wait the min cleanup timeout since max size is exceeded - :ok = Process.sleep(1100) - - # Cache should be empty now - assert LocalWithSizeLimit.count_all() == 0 - - # Put some entries without exceeding the max size - _ = cache_put(LocalWithSizeLimit, 5..6) - - # Validate current size - assert LocalWithSizeLimit.count_all() == 2 - - # Wait the max cleanup timeout (timeout should be relative to the size) - :ok = Process.sleep(1600) - - # The entries should be in the newer generation yet - assert LocalWithSizeLimit.count_all() == 2 - - # Put some entries to exceed the max size - _ = cache_put(LocalWithSizeLimit, 7..8) - - # Wait the max cleanup timeout - :ok = Process.sleep(1600) - - # The entries should be in the newer generation yet - assert LocalWithSizeLimit.count_all() == 4 - - # Wait the min cleanup timeout since max size is exceeded - :ok = Process.sleep(1100) - - # Cache should be empty now - assert LocalWithSizeLimit.count_all() == 0 - - # Stop the cache - :ok = LocalWithSizeLimit.stop() - end - end - - describe "cleanup cover" do - test "cleanup when gc_interval not set" do - {:ok, _pid} = - LocalWithSizeLimit.start_link( - max_size: 3, - gc_cleanup_min_timeout: 1000, - gc_cleanup_max_timeout: 1500 - ) - - # Put some entries to exceed the max size - _ = cache_put(LocalWithSizeLimit, 1..4) - - # Wait the max cleanup timeout - :ok = Process.sleep(1600) - - # assert not crashed - assert LocalWithSizeLimit.count_all() == 4 - - # Stop the cache - :ok = LocalWithSizeLimit.stop() - end - end - - ## Private Functions - - defp check_cache_size(cache) do - :cleanup = - cache - |> Generation.server() - |> send(:cleanup) - - :ok = Process.sleep(1000) - end - - defp flood_cache(mem_size, max_size) when mem_size > max_size do - :ok - end - - defp flood_cache(mem_size, max_size) when mem_size <= max_size do - :ok = - 100_000 - |> :rand.uniform() - |> LocalWithSizeLimit.put(generate_value(1000)) - - :ok = Process.sleep(500) - {mem_size, _} = Generation.memory_info(LocalWithSizeLimit) - flood_cache(mem_size, max_size) - end - - defp assert_mem_size(greater_or_less) do - {mem_size, max_size} = Generation.memory_info(LocalWithSizeLimit) - assert apply(Kernel, greater_or_less, [mem_size, max_size]) - end - - defp generate_value(n) do - for(_ <- 1..n, do: "a") - end - - defp generations_len(name) do - name - |> Generation.list() - |> length() - end - - defp task_fun(cache, i) do - :ok = cache.put("#{inspect(self())}.#{i}", i) - :ok = Process.sleep(1) - task_fun(cache, i + 1) - end -end diff --git a/test/nebulex/adapters/local_boolean_keys_test.exs b/test/nebulex/adapters/local_boolean_keys_test.exs deleted file mode 100644 index c66b99e3..00000000 --- a/test/nebulex/adapters/local_boolean_keys_test.exs +++ /dev/null @@ -1,101 +0,0 @@ -defmodule Nebulex.Adapters.LocalBooleanKeysTest do - use ExUnit.Case, async: true - - defmodule ETS do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - - defmodule Shards do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - - alias Nebulex.Adapters.LocalBooleanKeysTest.{ETS, Shards} - - setup do - {:ok, ets} = ETS.start_link() - {:ok, shards} = Shards.start_link(backend: :shards) - - on_exit(fn -> - :ok = Process.sleep(100) - if Process.alive?(ets), do: ETS.stop() - if Process.alive?(shards), do: Shards.stop() - end) - - {:ok, caches: [ETS, Shards]} - end - - describe "boolean keys" do - test "get and get_all", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put_all(a: true, b: false) - - assert cache.get(:a) == true - assert cache.get(:b) == false - - assert cache.get_all([:a, :b]) == %{a: true, b: false} - end) - end - - test "take", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put_all(a: true, b: false) - - assert cache.take(:a) == true - assert cache.take(:b) == false - - assert cache.get_all([:a, :b]) == %{} - end) - end - - test "delete true value", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put(:a, true) - - assert cache.get(:a) == true - assert cache.delete(:a) - assert cache.get(:a) == nil - end) - end - - test "delete false value", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put(:a, false) - - assert cache.get(:a) == false - assert cache.delete(:a) - assert cache.get(:a) == nil - end) - end - - test "put_new", %{caches: caches} do - for_all_caches(caches, fn cache -> - assert cache.put_new(:a, true) - :ok = cache.put(:a, false) - refute cache.put_new(:a, false) - - assert cache.get(:a) == false - end) - end - - test "has_key?", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put(:a, true) - - assert cache.has_key?(:a) - refute cache.has_key?(:b) - end) - end - end - - ## Helpers - - defp for_all_caches(caches, fun) do - Enum.each(caches, fn cache -> - fun.(cache) - end) - end -end diff --git a/test/nebulex/adapters/local_duplicate_keys_test.exs b/test/nebulex/adapters/local_duplicate_keys_test.exs deleted file mode 100644 index d3927b70..00000000 --- a/test/nebulex/adapters/local_duplicate_keys_test.exs +++ /dev/null @@ -1,180 +0,0 @@ -defmodule Nebulex.Adapters.LocalDuplicateKeysTest do - use ExUnit.Case, async: true - - defmodule ETS do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - - defmodule Shards do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - - import Ex2ms - - alias Nebulex.Adapters.LocalDuplicateKeysTest.{ETS, Shards} - - setup do - {:ok, ets} = ETS.start_link(backend_type: :duplicate_bag) - {:ok, shards} = Shards.start_link(backend: :shards, backend_type: :duplicate_bag) - - on_exit(fn -> - :ok = Process.sleep(100) - if Process.alive?(ets), do: ETS.stop() - if Process.alive?(shards), do: Shards.stop() - end) - - {:ok, caches: [ETS, Shards]} - end - - describe "duplicate keys" do - test "get and get_all", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put_all(a: 1, a: 2, a: 2, b: 1, b: 2, c: 1) - - assert cache.get(:a) == [1, 2, 2] - assert cache.get(:b) == [1, 2] - assert cache.get(:c) == 1 - - assert cache.get_all([:a, :b, :c]) == %{a: [1, 2, 2], b: [1, 2], c: 1} - end) - end - - test "take", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put_all(a: 1, a: 2, a: 2, b: 1, b: 2, c: 1) - - assert cache.take(:a) == [1, 2, 2] - assert cache.take(:b) == [1, 2] - assert cache.take(:c) == 1 - - assert cache.get_all([:a, :b, :c]) == %{} - end) - end - - test "delete", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put(:a, 1) - :ok = cache.put(:a, 2) - :ok = cache.put(:a, 2) - - assert cache.get(:a) == [1, 2, 2] - assert cache.delete(:a) - refute cache.get(:a) - end) - end - - test "put_new", %{caches: caches} do - for_all_caches(caches, fn cache -> - assert cache.put_new(:a, 1) - :ok = cache.put(:a, 2) - refute cache.put_new(:a, 3) - - assert cache.get(:a) == [1, 2] - end) - end - - test "has_key?", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put(:a, 1) - :ok = cache.put(:a, 2) - - assert cache.has_key?(:a) - refute cache.has_key?(:b) - end) - end - - test "ttl", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put(:a, 1, ttl: 5000) - :ok = cache.put(:a, 2, ttl: 10_000) - :ok = cache.put(:a, 3) - - [ttl1, ttl2, ttl3] = cache.ttl(:a) - assert ttl1 > 1000 - assert ttl2 > 6000 - assert ttl3 == :infinity - - refute cache.ttl(:b) - end) - end - - test "count_all and delete_all", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put_all(a: 1, a: 2, a: 2, b: 1, b: 2, c: 1) - - assert cache.count_all() == 6 - assert cache.delete_all() == 6 - assert cache.count_all() == 0 - end) - end - - test "all and stream using match_spec queries", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put_all(a: 1, a: 2, a: 2, b: 1, b: 2, c: 1) - - test_ms = - fun do - {_, key, value, _, _} when value == 2 -> key - end - - res_stream = test_ms |> cache.stream() |> Enum.to_list() |> Enum.sort() - res_query = test_ms |> cache.all() |> Enum.sort() - - assert res_stream == [:a, :a, :b] - assert res_query == res_stream - end) - end - end - - describe "unsupported commands" do - test "replace", %{caches: caches} do - for_all_caches(caches, fn cache -> - assert_raise ArgumentError, fn -> - cache.replace(:a, 1) - end - end) - end - - test "incr", %{caches: caches} do - for_all_caches(caches, fn cache -> - assert_raise ArgumentError, fn -> - cache.incr(:a) - end - end) - end - - test "expire", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put(:a, 1) - :ok = cache.put(:a, 2) - - assert_raise ArgumentError, fn -> - cache.expire(:a, 5000) - end - end) - end - - test "touch", %{caches: caches} do - for_all_caches(caches, fn cache -> - :ok = cache.put(:a, 1) - :ok = cache.put(:a, 2) - - assert_raise ArgumentError, fn -> - cache.touch(:a) - end - end) - end - end - - ## Helpers - - defp for_all_caches(caches, fun) do - Enum.each(caches, fn cache -> - fun.(cache) - end) - end -end diff --git a/test/nebulex/adapters/local_ets_test.exs b/test/nebulex/adapters/local_ets_test.exs deleted file mode 100644 index 4619b5bc..00000000 --- a/test/nebulex/adapters/local_ets_test.exs +++ /dev/null @@ -1,20 +0,0 @@ -defmodule Nebulex.Adapters.LocalEtsTest do - use ExUnit.Case, async: true - use Nebulex.LocalTest - use Nebulex.CacheTest - - import Nebulex.CacheCase - - alias Nebulex.Adapter - alias Nebulex.TestCache.Cache - - setup_with_dynamic_cache(Cache, :local_with_ets, purge_batch_size: 10) - - describe "ets" do - test "backend", %{name: name} do - Adapter.with_meta(name, fn _, meta -> - assert meta.backend == :ets - end) - end - end -end diff --git a/test/nebulex/adapters/local_shards_test.exs b/test/nebulex/adapters/local_shards_test.exs deleted file mode 100644 index 53c7366a..00000000 --- a/test/nebulex/adapters/local_shards_test.exs +++ /dev/null @@ -1,37 +0,0 @@ -defmodule Nebulex.Adapters.LocalWithShardsTest do - use ExUnit.Case, async: true - use Nebulex.LocalTest - use Nebulex.CacheTest - - import Nebulex.CacheCase - - alias Nebulex.Adapter - alias Nebulex.TestCache.Cache - - setup_with_dynamic_cache(Cache, :local_with_shards, backend: :shards) - - describe "shards" do - test "backend", %{name: name} do - Adapter.with_meta(name, fn _, meta -> - assert meta.backend == :shards - end) - end - - test "custom partitions" do - defmodule CustomPartitions do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - - :ok = Application.put_env(:nebulex, CustomPartitions, backend: :shards, partitions: 2) - {:ok, _pid} = CustomPartitions.start_link() - - assert CustomPartitions.newer_generation() - |> :shards.table_meta() - |> :shards_meta.partitions() == 2 - - :ok = CustomPartitions.stop() - end - end -end diff --git a/test/nebulex/adapters/multilevel_concurrency_test.exs b/test/nebulex/adapters/multilevel_concurrency_test.exs deleted file mode 100644 index eefcf01c..00000000 --- a/test/nebulex/adapters/multilevel_concurrency_test.exs +++ /dev/null @@ -1,159 +0,0 @@ -defmodule Nebulex.Adapters.MultilevelConcurrencyTest do - use ExUnit.Case, async: true - - import Nebulex.CacheCase - - alias Nebulex.TestCache.Multilevel.L2 - - defmodule SleeperMock do - @moduledoc false - @behaviour Nebulex.Adapter - @behaviour Nebulex.Adapter.Entry - @behaviour Nebulex.Adapter.Queryable - - alias Nebulex.Adapters.Local - - @impl true - defmacro __before_compile__(_), do: :ok - - @impl true - defdelegate init(opts), to: Local - - def post(opts) do - with f when is_function(f) <- opts[:post] do - f.() - end - end - - @impl true - defdelegate get(meta, key, opts), to: Local - - @impl true - defdelegate put(meta, key, value, ttl, on_write, opts), to: Local - - @impl true - def delete(meta, key, opts) do - result = Local.delete(meta, key, opts) - post(opts) - result - end - - @impl true - defdelegate take(meta, key, opts), to: Local - - @impl true - defdelegate has_key?(meta, key), to: Local - - @impl true - defdelegate ttl(meta, key), to: Local - - @impl true - defdelegate expire(meta, key, ttl), to: Local - - @impl true - defdelegate touch(meta, key), to: Local - - @impl true - defdelegate update_counter(meta, key, amount, ttl, default, opts), to: Local - - @impl true - defdelegate get_all(meta, keys, opts), to: Local - - @impl true - defdelegate put_all(meta, entries, ttl, on_write, opts), to: Local - - @impl true - def execute(meta, operation, query, opts) do - result = Local.execute(meta, operation, query, opts) - post(opts) - result - end - - @impl true - defdelegate stream(meta, query, opts), to: Local - end - - defmodule L1 do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: SleeperMock - end - - defmodule Multilevel do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Multilevel - end - - @levels [ - {L1, name: :multilevel_concurrency_l1}, - {L2, name: :multilevel_concurrency_l2} - ] - - setup_with_cache(Multilevel, - model: :inclusive, - levels: @levels - ) - - describe "delete" do - test "deletes in reverse order", %{cache: cache} do - test_pid = self() - - assert :ok = cache.put("foo", "stale") - - task = - Task.async(fn -> - cache.delete("foo", - post: fn -> - send(test_pid, :deleted_in_l1) - - receive do - :continue -> :ok - after - 5000 -> - raise "Did not receive continue message" - end - end - ) - end) - - assert_receive :deleted_in_l1 - refute cache.get("foo") - send(task.pid, :continue) - assert Task.await(task) == :ok - assert cache.get("foo", level: 1) == nil - assert cache.get("foo", level: 2) == nil - end - end - - describe "delete_all" do - test "deletes in reverse order", %{cache: cache} do - test_pid = self() - - assert :ok = cache.put_all(%{a: "stale", b: "stale"}) - - task = - Task.async(fn -> - cache.delete_all(nil, - post: fn -> - send(test_pid, :deleted_in_l1) - - receive do - :continue -> :ok - after - 5000 -> - raise "Did not receive continue message" - end - end - ) - end) - - assert_receive :deleted_in_l1 - refute cache.get(:a) - refute cache.get(:b) - send(task.pid, :continue) - assert Task.await(task) == 4 - assert cache.get_all([:a, :b]) == %{} - end - end -end diff --git a/test/nebulex/adapters/multilevel_exclusive_test.exs b/test/nebulex/adapters/multilevel_exclusive_test.exs deleted file mode 100644 index 287242fb..00000000 --- a/test/nebulex/adapters/multilevel_exclusive_test.exs +++ /dev/null @@ -1,94 +0,0 @@ -defmodule Nebulex.Adapters.MultilevelExclusiveTest do - use ExUnit.Case, async: true - use Nebulex.NodeCase - use Nebulex.MultilevelTest - use Nebulex.Cache.QueryableTest - use Nebulex.Cache.TransactionTest - - import Nebulex.CacheCase - - alias Nebulex.Adapters.Local.Generation - alias Nebulex.Cache.Cluster - alias Nebulex.TestCache.Multilevel - alias Nebulex.TestCache.Multilevel.{L1, L2, L3} - - @gc_interval :timer.hours(1) - - @levels [ - { - L1, - name: :multilevel_exclusive_l1, gc_interval: @gc_interval, backend: :shards, partitions: 2 - }, - { - L2, - name: :multilevel_exclusive_l2, primary: [gc_interval: @gc_interval] - }, - { - L3, - name: :multilevel_exclusive_l3, - primary: [gc_interval: @gc_interval, backend: :shards, partitions: 2] - } - ] - - setup_with_dynamic_cache(Multilevel, :multilevel_exclusive, - model: :exclusive, - levels: @levels - ) - - describe "multilevel exclusive" do - test "returns partitions for L1 with shards backend", %{name: name} do - assert :"#{name}_l1" - |> Generation.newer() - |> :shards.table_meta() - |> :shards_meta.partitions() == 2 - end - - test "get" do - :ok = Multilevel.put(1, 1, level: 1) - :ok = Multilevel.put(2, 2, level: 2) - :ok = Multilevel.put(3, 3, level: 3) - - assert Multilevel.get(1) == 1 - assert Multilevel.get(2, return: :key) == 2 - assert Multilevel.get(3) == 3 - refute Multilevel.get(2, level: 1) - refute Multilevel.get(3, level: 1) - refute Multilevel.get(1, level: 2) - refute Multilevel.get(3, level: 2) - refute Multilevel.get(1, level: 3) - refute Multilevel.get(2, level: 3) - end - end - - describe "partitioned level" do - test "returns cluster nodes" do - assert Cluster.get_nodes(:multilevel_exclusive_l3) == [node()] - end - - test "joining new node" do - node = :"node1@127.0.0.1" - - {:ok, pid} = - start_cache(node, Multilevel, - name: :multilevel_exclusive, - model: :exclusive, - levels: @levels - ) - - # check cluster nodes - assert Cluster.get_nodes(:multilevel_exclusive_l3) == [node, node()] - - kv_pairs = for k <- 1..100, do: {k, k} - - Multilevel.transaction(fn -> - assert Multilevel.put_all(kv_pairs) == :ok - - for k <- 1..100 do - assert Multilevel.get(k) == k - end - end) - - :ok = stop_cache(:"node1@127.0.0.1", pid) - end - end -end diff --git a/test/nebulex/adapters/multilevel_inclusive_test.exs b/test/nebulex/adapters/multilevel_inclusive_test.exs deleted file mode 100644 index 810455d1..00000000 --- a/test/nebulex/adapters/multilevel_inclusive_test.exs +++ /dev/null @@ -1,140 +0,0 @@ -defmodule Nebulex.Adapters.MultilevelInclusiveTest do - use ExUnit.Case, async: true - use Nebulex.NodeCase - use Nebulex.MultilevelTest - use Nebulex.Cache.QueryableTest - use Nebulex.Cache.TransactionTest - - import Nebulex.CacheCase - - alias Nebulex.Adapters.Local.Generation - alias Nebulex.Cache.Cluster - alias Nebulex.TestCache.Multilevel - alias Nebulex.TestCache.Multilevel.{L1, L2, L3} - - @gc_interval :timer.hours(1) - - @levels [ - { - L1, - name: :multilevel_inclusive_l1, gc_interval: @gc_interval, backend: :shards, partitions: 2 - }, - { - L2, - name: :multilevel_inclusive_l2, primary: [gc_interval: @gc_interval] - }, - { - L3, - name: :multilevel_inclusive_l3, - primary: [gc_interval: @gc_interval, backend: :shards, partitions: 2] - } - ] - - setup_with_dynamic_cache(Multilevel, :multilevel_inclusive, - model: :inclusive, - levels: @levels - ) - - describe "multilevel inclusive" do - test "returns partitions for L1 with shards backend", %{name: name} do - assert :"#{name}_l1" - |> Generation.newer() - |> :shards.table_meta() - |> :shards_meta.partitions() == 2 - end - - test "get" do - :ok = Process.sleep(2000) - :ok = Multilevel.put(1, 1, level: 1) - :ok = Multilevel.put(2, 2, level: 2) - :ok = Multilevel.put(3, 3, level: 3) - - assert Multilevel.get(1) == 1 - refute Multilevel.get(1, level: 2) - refute Multilevel.get(1, level: 3) - - assert Multilevel.get(2) == 2 - assert Multilevel.get(2, level: 1) == 2 - assert Multilevel.get(2, level: 2) == 2 - refute Multilevel.get(2, level: 3) - - assert Multilevel.get(3, level: 3) == 3 - refute Multilevel.get(3, level: 1) - refute Multilevel.get(3, level: 2) - - assert Multilevel.get(3) == 3 - assert Multilevel.get(3, level: 1) == 3 - assert Multilevel.get(3, level: 2) == 3 - assert Multilevel.get(3, level: 2) == 3 - end - - test "get boolean" do - :ok = Multilevel.put(1, true, level: 1) - :ok = Multilevel.put(2, false, level: 1) - - assert Multilevel.get(1) == true - assert Multilevel.get(2) == false - end - - test "fetched value is replicated with TTL on previous levels" do - assert Multilevel.put(:a, 1, ttl: 1000) == :ok - assert Multilevel.ttl(:a) > 0 - - :ok = Process.sleep(1100) - refute Multilevel.get(:a, level: 1) - refute Multilevel.get(:a, level: 2) - refute Multilevel.get(:a, level: 3) - - assert Multilevel.put(:b, 1, level: 3) == :ok - assert Multilevel.ttl(:b) == :infinity - assert Multilevel.expire(:b, 1000) - assert Multilevel.ttl(:b) > 0 - refute Multilevel.get(:b, level: 1) - refute Multilevel.get(:b, level: 2) - assert Multilevel.get(:b, level: 3) == 1 - - assert Multilevel.get(:b) == 1 - assert Multilevel.get(:b, level: 1) == 1 - assert Multilevel.get(:b, level: 2) == 1 - assert Multilevel.get(:b, level: 3) == 1 - - :ok = Process.sleep(1100) - refute Multilevel.get(:b, level: 1) - refute Multilevel.get(:b, level: 2) - refute Multilevel.get(:b, level: 3) - end - end - - describe "distributed levels" do - test "return cluster nodes" do - assert Cluster.get_nodes(:multilevel_inclusive_l2) == [node()] - assert Cluster.get_nodes(:multilevel_inclusive_l3) == [node()] - end - - test "joining new node" do - node = :"node1@127.0.0.1" - - {:ok, pid} = - start_cache(node, Multilevel, - name: :multilevel_inclusive, - model: :inclusive, - levels: @levels - ) - - # check cluster nodes - assert Cluster.get_nodes(:multilevel_inclusive_l3) == [node, node()] - - kv_pairs = for k <- 1..100, do: {k, k} - - Multilevel.transaction(fn -> - assert Multilevel.put_all(kv_pairs) == :ok - - for k <- 1..100 do - assert Multilevel.get(k) == k - end - end) - - :ok = stop_cache(:"node1@127.0.0.1", pid) - end - end -end diff --git a/test/nebulex/adapters/nil_test.exs b/test/nebulex/adapters/nil_test.exs index bbacb002..be8041d7 100644 --- a/test/nebulex/adapters/nil_test.exs +++ b/test/nebulex/adapters/nil_test.exs @@ -15,117 +15,104 @@ defmodule Nebulex.Adapters.NilTest do describe "entry" do property "put", %{cache: cache} do check all term <- term() do - refute cache.get(term) + assert cache.has_key?(term) == {:ok, false} - assert cache.replace(term, term) + assert cache.replace(term, term) == {:ok, true} assert cache.put(term, term) == :ok - assert cache.put_new(term, term) - refute cache.get(term) + assert cache.put_new(term, term) == {:ok, true} + assert cache.has_key?(term) == {:ok, false} end end test "put_all", %{cache: cache} do assert cache.put_all(a: 1, b: 2, c: 3) == :ok - refute cache.get(:a) - refute cache.get(:b) - refute cache.get(:c) + assert cache.has_key?(:a) == {:ok, false} + assert cache.has_key?(:b) == {:ok, false} + assert cache.has_key?(:c) == {:ok, false} end - test "get", %{cache: cache} do - assert cache.put("foo", "bar") == :ok - refute cache.get("foo") - end - - test "get_all", %{cache: cache} do - assert cache.put("foo", "bar") == :ok - assert cache.get_all("foo") == %{} + test "fetch", %{cache: cache} do + assert {:error, %Nebulex.KeyError{key: "foo"}} = cache.fetch("foo") end test "delete", %{cache: cache} do - assert cache.put("foo", "bar") == :ok assert cache.delete("foo") == :ok end test "take", %{cache: cache} do - assert cache.put("foo", "bar") == :ok - refute cache.take("foo") + assert {:error, %Nebulex.KeyError{key: "foo"}} = cache.take("foo") end test "has_key?", %{cache: cache} do - assert cache.put("foo", "bar") == :ok - refute cache.has_key?("foo") + assert cache.has_key?("foo") == {:ok, false} end test "ttl", %{cache: cache} do - assert cache.put("foo", "bar") == :ok - refute cache.ttl("foo") + assert {:error, %Nebulex.KeyError{key: "foo"}} = cache.ttl("foo") end test "expire", %{cache: cache} do - assert cache.put("foo", "bar") == :ok - assert cache.expire("foo", 1000) - refute cache.get("foo") + assert cache.expire("foo", 1000) == {:ok, false} end test "touch", %{cache: cache} do - assert cache.put("foo", "bar") == :ok - assert cache.touch("foo") - refute cache.get("foo") + assert cache.touch("foo") == {:ok, false} end test "incr", %{cache: cache} do - assert cache.incr(:counter) == 1 - assert cache.incr(:counter, 10) == 10 - assert cache.incr(:counter, -10) == -10 - assert cache.incr(:counter, 5, default: 10) == 15 - assert cache.incr(:counter, -5, default: 10) == 5 + assert cache.incr!(:counter) == 1 + assert cache.incr!(:counter, 10) == 10 + assert cache.incr!(:counter, -10) == -10 + assert cache.incr!(:counter, 5, default: 10) == 15 + assert cache.incr!(:counter, -5, default: 10) == 5 end test "decr", %{cache: cache} do - assert cache.decr(:counter) == -1 - assert cache.decr(:counter, 10) == -10 - assert cache.decr(:counter, -10) == 10 - assert cache.decr(:counter, 5, default: 10) == 5 - assert cache.decr(:counter, -5, default: 10) == 15 + assert cache.decr!(:counter) == -1 + assert cache.decr!(:counter, 10) == -10 + assert cache.decr!(:counter, -10) == 10 + assert cache.decr!(:counter, 5, default: 10) == 5 + assert cache.decr!(:counter, -5, default: 10) == 15 end end describe "queryable" do - test "all", %{cache: cache} do + test "get_all", %{cache: cache} do assert cache.put("foo", "bar") == :ok - assert cache.all() == [] + assert cache.get_all!() == [] end test "stream", %{cache: cache} do assert cache.put("foo", "bar") == :ok - assert cache.stream() |> Enum.to_list() == [] + assert cache.stream!() |> Enum.to_list() == [] end test "count_all", %{cache: cache} do assert cache.put("foo", "bar") == :ok - assert cache.count_all() == 0 + assert cache.count_all!() == 0 end test "delete_all", %{cache: cache} do assert cache.put("foo", "bar") == :ok - assert cache.delete_all() == 0 + assert cache.delete_all!() == 0 end end describe "transaction" do test "single transaction", %{cache: cache} do - refute cache.transaction(fn -> + assert cache.transaction(fn -> :ok = cache.put("foo", "bar") - cache.get("foo") - end) + cache.get!("foo") + end) == {:ok, nil} end test "in_transaction?", %{cache: cache} do - refute cache.in_transaction?() + assert cache.in_transaction?() == {:ok, false} cache.transaction(fn -> - :ok = cache.put(1, 11, return: :key) - true = cache.in_transaction?() + :ok = cache.put(1, 11) + + assert cache.in_transaction?() == {:ok, true} end) end end @@ -139,22 +126,31 @@ defmodule Nebulex.Adapters.NilTest do assert cache.dump(path) == :ok assert cache.load(path) == :ok - assert cache.count_all() == 0 + assert cache.count_all!() == 0 end end - describe "stats" do - test "stats/0", %{cache: cache} do + describe "info" do + test "ok: returns stats info", %{cache: cache} do assert cache.put("foo", "bar") == :ok - refute cache.get("foo") - assert cache.stats() == %Nebulex.Stats{} + refute cache.get!("foo") + + assert cache.info!(:stats) == %{ + deletions: 0, + evictions: 0, + expirations: 0, + hits: 0, + misses: 0, + updates: 0, + writes: 0 + } end end ## Private Functions defp setup_cache(_config) do - {:ok, pid} = NilCache.start_link() + {:ok, pid} = NilCache.start_link(telemetry: false) on_exit(fn -> :ok = Process.sleep(100) diff --git a/test/nebulex/adapters/partitioned_test.exs b/test/nebulex/adapters/partitioned_test.exs deleted file mode 100644 index dc0bee95..00000000 --- a/test/nebulex/adapters/partitioned_test.exs +++ /dev/null @@ -1,265 +0,0 @@ -defmodule Nebulex.Adapters.PartitionedTest do - use Nebulex.NodeCase - use Nebulex.CacheTest - - import Nebulex.CacheCase - import Nebulex.Helpers - - alias Nebulex.Adapter - alias Nebulex.TestCache.{Partitioned, PartitionedMock} - - @primary :"primary@127.0.0.1" - @cache_name :partitioned_cache - - # Set config - :ok = Application.put_env(:nebulex, Partitioned, primary: [backend: :shards]) - - setup do - cluster = :lists.usort([@primary | Application.get_env(:nebulex, :nodes, [])]) - - node_pid_list = - start_caches( - [node() | Node.list()], - [ - {Partitioned, [name: @cache_name, join_timeout: 2000]}, - {PartitionedMock, []} - ] - ) - - default_dynamic_cache = Partitioned.get_dynamic_cache() - _ = Partitioned.put_dynamic_cache(@cache_name) - - on_exit(fn -> - _ = Partitioned.put_dynamic_cache(default_dynamic_cache) - :ok = Process.sleep(100) - stop_caches(node_pid_list) - end) - - {:ok, cache: Partitioned, name: @cache_name, cluster: cluster} - end - - describe "c:init/1" do - test "initializes the primary store metadata" do - Adapter.with_meta(PartitionedCache.Primary, fn adapter, meta -> - assert adapter == Nebulex.Adapters.Local - assert meta.backend == :shards - end) - end - - test "raises an exception because invalid primary store" do - assert_raise ArgumentError, ~r"adapter Invalid was not compiled", fn -> - defmodule Demo do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Partitioned, - primary_storage_adapter: Invalid - end - end - end - - test "fails because unloaded keyslot module" do - assert {:error, {%ArgumentError{message: msg}, _}} = - Partitioned.start_link( - name: :unloaded_keyslot, - keyslot: UnloadedKeyslot - ) - - assert Regex.match?(~r"keyslot UnloadedKeyslot was not compiled", msg) - end - - test "fails because keyslot module does not implement expected behaviour" do - assert {:error, {%ArgumentError{message: msg}, _}} = - Partitioned.start_link( - name: :invalid_keyslot, - keyslot: __MODULE__ - ) - - mod = inspect(__MODULE__) - behaviour = "Nebulex.Adapter.Keyslot" - assert Regex.match?(~r"expected #{mod} to implement the behaviour #{behaviour}", msg) - end - - test "fails because invalid keyslot option" do - assert {:error, {%ArgumentError{message: msg}, _}} = - Partitioned.start_link( - name: :invalid_keyslot, - keyslot: "invalid" - ) - - assert Regex.match?(~r"expected keyslot: to be an atom, got: \"invalid\"", msg) - end - end - - describe "partitioned cache" do - test "custom keyslot" do - defmodule Keyslot do - @behaviour Nebulex.Adapter.Keyslot - - @impl true - def hash_slot(key, range) do - key - |> :erlang.phash2() - |> rem(range) - end - end - - test_with_dynamic_cache(Partitioned, [name: :custom_keyslot, keyslot: Keyslot], fn -> - refute Partitioned.get("foo") - assert Partitioned.put("foo", "bar") == :ok - assert Partitioned.get("foo") == "bar" - end) - end - - test "get_and_update" do - assert Partitioned.get_and_update(1, &Partitioned.get_and_update_fun/1) == {nil, 1} - assert Partitioned.get_and_update(1, &Partitioned.get_and_update_fun/1) == {1, 2} - assert Partitioned.get_and_update(1, &Partitioned.get_and_update_fun/1) == {2, 4} - - assert_raise ArgumentError, fn -> - Partitioned.get_and_update(1, &Partitioned.get_and_update_bad_fun/1) - end - end - - test "incr raises when the counter is not an integer" do - :ok = Partitioned.put(:counter, "string") - - assert_raise ArgumentError, fn -> - Partitioned.incr(:counter, 10) - end - end - end - - describe "cluster scenario:" do - test "node leaves and then rejoins", %{name: name, cluster: cluster} do - assert node() == @primary - assert :lists.usort(Node.list()) == cluster -- [node()] - assert Partitioned.nodes() == cluster - - Partitioned.with_dynamic_cache(name, fn -> - :ok = Partitioned.leave_cluster() - assert Partitioned.nodes() == cluster -- [node()] - end) - - Partitioned.with_dynamic_cache(name, fn -> - :ok = Partitioned.join_cluster() - assert Partitioned.nodes() == cluster - end) - end - - test "teardown cache node", %{cluster: cluster} do - assert Partitioned.nodes() == cluster - - assert Partitioned.put(1, 1) == :ok - assert Partitioned.get(1) == 1 - - node = teardown_cache(1) - - wait_until(fn -> - assert Partitioned.nodes() == cluster -- [node] - end) - - refute Partitioned.get(1) - - assert :ok == Partitioned.put_all([{4, 44}, {2, 2}, {1, 1}]) - - assert Partitioned.get(4) == 44 - assert Partitioned.get(2) == 2 - assert Partitioned.get(1) == 1 - end - - test "bootstrap leaves cache from the cluster when terminated and then rejoins when restarted", - %{name: name} do - prefix = [:nebulex, :test_cache, :partitioned, :bootstrap] - started = prefix ++ [:started] - stopped = prefix ++ [:stopped] - joined = prefix ++ [:joined] - exit_sig = prefix ++ [:exit] - - with_telemetry_handler(__MODULE__, [started, stopped, joined, exit_sig], fn -> - assert node() in Partitioned.nodes() - - true = - [name, Bootstrap] - |> normalize_module_name() - |> Process.whereis() - |> Process.exit(:stop) - - assert_receive {^exit_sig, %{system_time: _}, %{reason: :stop}}, 5000 - assert_receive {^stopped, %{system_time: _}, %{reason: :stop, cluster_nodes: nodes}}, 5000 - - refute node() in nodes - - assert_receive {^started, %{system_time: _}, %{}}, 5000 - assert_receive {^joined, %{system_time: _}, %{cluster_nodes: nodes}}, 5000 - - assert node() in nodes - assert nodes -- Partitioned.nodes() == [] - - :ok = Process.sleep(2100) - - assert_receive {^joined, %{system_time: _}, %{cluster_nodes: nodes}}, 5000 - assert node() in nodes - end) - end - end - - describe "rpc" do - test "timeout error" do - assert Partitioned.put_all(for(x <- 1..100_000, do: {x, x}), timeout: 60_000) == :ok - assert Partitioned.get(1, timeout: 1000) == 1 - - msg = ~r"RPC error while executing action :all\n\nSuccessful responses:" - - assert_raise Nebulex.RPCMultiCallError, msg, fn -> - Partitioned.all(nil, timeout: 1) - end - end - - test "runtime error" do - _ = Process.flag(:trap_exit, true) - - assert [1, 2] |> PartitionedMock.get_all(timeout: 10) |> map_size() == 0 - - assert PartitionedMock.put_all(a: 1, b: 2) == :ok - - assert [1, 2] |> PartitionedMock.get_all() |> map_size() == 0 - - assert_raise ArgumentError, fn -> - PartitionedMock.get(1) - end - - msg = ~r"RPC error while executing action :count_all\n\nSuccessful responses:" - - assert_raise Nebulex.RPCMultiCallError, msg, fn -> - PartitionedMock.count_all() - end - end - end - - if Code.ensure_loaded?(:erpc) do - describe ":erpc" do - test "timeout error" do - assert Partitioned.put(1, 1) == :ok - assert Partitioned.get(1, timeout: 1000) == 1 - - node = "#{inspect(Partitioned.get_node(1))}" - reason = "#{inspect({:erpc, :timeout})}" - - msg = ~r"The RPC operation failed on node #{node} with reason:\n\n#{reason}" - - assert_raise Nebulex.RPCError, msg, fn -> - Partitioned.get(1, timeout: 0) - end - end - end - end - - ## Private Functions - - defp teardown_cache(key) do - node = Partitioned.get_node(key) - remote_pid = :rpc.call(node, Process, :whereis, [@cache_name]) - :ok = :rpc.call(node, Supervisor, :stop, [remote_pid]) - node - end -end diff --git a/test/nebulex/adapters/replicated_test.exs b/test/nebulex/adapters/replicated_test.exs deleted file mode 100644 index c0629c52..00000000 --- a/test/nebulex/adapters/replicated_test.exs +++ /dev/null @@ -1,347 +0,0 @@ -defmodule Nebulex.Adapters.ReplicatedTest do - use Nebulex.NodeCase - use Nebulex.CacheTest - - import Mock - import Nebulex.CacheCase - - alias Nebulex.TestCache.{Replicated, ReplicatedMock} - - @cache_name :replicated_cache - - setup_all do - node_pid_list = start_caches(cluster_nodes(), [{Replicated, [name: @cache_name]}]) - - on_exit(fn -> - :ok = Process.sleep(100) - stop_caches(node_pid_list) - end) - - {:ok, cache: Replicated, name: @cache_name} - end - - setup do - default_dynamic_cache = Replicated.get_dynamic_cache() - _ = Replicated.put_dynamic_cache(@cache_name) - - _ = Replicated.delete_all() - - on_exit(fn -> - Replicated.put_dynamic_cache(default_dynamic_cache) - end) - - :ok - end - - describe "c:init/1" do - test "raises an exception because invalid primary store" do - assert_raise ArgumentError, ~r"adapter Invalid was not compiled", fn -> - defmodule Demo do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Replicated, - primary_storage_adapter: Invalid - end - end - end - end - - describe "replicated cache:" do - test "put/3" do - assert Replicated.put(1, 1) == :ok - assert Replicated.get(1) == 1 - - assert_for_all_replicas(Replicated, :get, [1], 1) - - assert Replicated.put_all(a: 1, b: 2, c: 3) == :ok - - assert_for_all_replicas(Replicated, :get_all, [[:a, :b, :c]], %{a: 1, b: 2, c: 3}) - end - - test "delete/2" do - assert Replicated.put("foo", "bar") == :ok - assert Replicated.get("foo") == "bar" - - assert_for_all_replicas(Replicated, :get, ["foo"], "bar") - - assert Replicated.delete("foo") == :ok - refute Replicated.get("foo") - - assert_for_all_replicas(Replicated, :get, ["foo"], nil) - end - - test "take/2" do - assert Replicated.put("foo", "bar") == :ok - assert Replicated.get("foo") == "bar" - - assert_for_all_replicas(Replicated, :get, ["foo"], "bar") - - assert Replicated.take("foo") == "bar" - refute Replicated.get("foo") - - assert_for_all_replicas(Replicated, :take, ["foo"], nil) - end - - test "incr/3" do - assert Replicated.incr(:counter, 3) == 3 - assert Replicated.incr(:counter) == 4 - - assert_for_all_replicas(Replicated, :get, [:counter], 4) - end - - test "incr/3 raises when the counter is not an integer" do - :ok = Replicated.put(:counter, "string") - - assert_raise ArgumentError, fn -> - Replicated.incr(:counter, 10) - end - end - - test "delete_all/2" do - assert Replicated.put_all(a: 1, b: 2, c: 3) == :ok - - assert_for_all_replicas(Replicated, :get_all, [[:a, :b, :c]], %{a: 1, b: 2, c: 3}) - - assert Replicated.delete_all() == 3 - assert Replicated.count_all() == 0 - - assert_for_all_replicas(Replicated, :get_all, [[:a, :b, :c]], %{}) - end - end - - describe "cluster" do - test "node leaves and then rejoins", %{name: name} do - cluster = :lists.usort(cluster_nodes()) - - wait_until(fn -> - assert Replicated.nodes() == cluster - end) - - Replicated.with_dynamic_cache(name, fn -> - :ok = Replicated.leave_cluster() - end) - - wait_until(fn -> - assert Replicated.nodes() == cluster -- [node()] - end) - - Replicated.with_dynamic_cache(name, fn -> - :ok = Replicated.join_cluster() - end) - - wait_until(fn -> - assert Replicated.nodes() == cluster - end) - end - - test "error: rpc error" do - node_pid_list = start_caches(cluster_nodes(), [{ReplicatedMock, []}]) - - try do - _ = Process.flag(:trap_exit, true) - - msg = ~r"RPC error while executing action :put_all\n\nSuccessful responses:" - - assert_raise Nebulex.RPCMultiCallError, msg, fn -> - ReplicatedMock.put_all(a: 1, b: 2) - end - after - stop_caches(node_pid_list) - end - end - - test "ok: start/stop cache nodes" do - event = [:nebulex, :test_cache, :replicated, :replication] - - with_telemetry_handler(__MODULE__, [event], fn -> - assert Replicated.nodes() |> :lists.usort() == :lists.usort(cluster_nodes()) - - assert Replicated.put_all(a: 1, b: 2) == :ok - assert Replicated.put(:c, 3, ttl: 5000) == :ok - - assert_for_all_replicas( - Replicated, - :get_all, - [[:a, :b, :c]], - %{a: 1, b: 2, c: 3} - ) - - # start new cache nodes - nodes = [:"node3@127.0.0.1", :"node4@127.0.0.1"] - node_pid_list = start_caches(nodes, [{Replicated, [name: @cache_name]}]) - - wait_until(fn -> - assert Replicated.nodes() |> :lists.usort() == :lists.usort(nodes ++ cluster_nodes()) - end) - - wait_until(10, 1000, fn -> - assert_for_all_replicas( - Replicated, - :get_all, - [[:a, :b, :c]], - %{a: 1, b: 2, c: 3} - ) - end) - - # stop cache node - :ok = node_pid_list |> hd() |> List.wrap() |> stop_caches() - - if Code.ensure_loaded?(:pg) do - # errors on failed nodes should be ignored - with_mock Nebulex.Cache.Cluster, [:passthrough], - get_nodes: fn _ -> [:"node5@127.0.0.1"] ++ nodes end do - assert Replicated.put(:foo, :bar) == :ok - - assert_receive {^event, %{rpc_errors: 2}, meta} - assert meta[:adapter_meta][:cache] == Replicated - assert meta[:adapter_meta][:name] == :replicated_cache - assert meta[:function_name] == :put - - assert [ - "node5@127.0.0.1": :noconnection, - "node3@127.0.0.1": %Nebulex.RegistryLookupError{} - ] = meta[:rpc_errors] - end - end - - wait_until(10, 1000, fn -> - assert Replicated.nodes() |> :lists.usort() == - :lists.usort(cluster_nodes() ++ [:"node4@127.0.0.1"]) - end) - - assert_for_all_replicas( - Replicated, - :get_all, - [[:a, :b, :c]], - %{a: 1, b: 2, c: 3} - ) - - :ok = stop_caches(node_pid_list) - end) - end - end - - describe "write-like operations locked" do - test "when a delete_all command is ongoing" do - test_with_dynamic_cache(ReplicatedMock, [name: :replicated_global_mock], fn -> - true = Process.register(self(), __MODULE__) - _ = Process.flag(:trap_exit, true) - - task1 = - Task.async(fn -> - _ = ReplicatedMock.put_dynamic_cache(:replicated_global_mock) - _ = ReplicatedMock.delete_all() - send(__MODULE__, :delete_all) - end) - - task2 = - Task.async(fn -> - :ok = Process.sleep(1000) - _ = ReplicatedMock.put_dynamic_cache(:replicated_global_mock) - :ok = ReplicatedMock.put("foo", "bar") - :ok = Process.sleep(100) - send(__MODULE__, :put) - end) - - assert_receive :delete_all, 5000 - assert_receive :put, 5000 - - [_, _] = Task.yield_many([task1, task2]) - end) - end - end - - describe "doesn't leave behind EXIT messages after calling, with exits trapped:" do - test "all/0" do - put_all_and_trap_exits(a: 1, b: 2, c: 3) - Replicated.all() - refute_receive {:EXIT, _, :normal} - end - - test "delete/1" do - put_all_and_trap_exits(a: 1) - Replicated.delete(:a) - refute_receive {:EXIT, _, :normal} - end - - test "delete_all/2" do - put_all_and_trap_exits(a: 1, b: 2, c: 3) - Replicated.delete_all() - refute_receive {:EXIT, _, :normal} - end - - test "get/1" do - put_all_and_trap_exits(a: 1) - Replicated.get(:a) - refute_receive {:EXIT, _, :normal} - end - - test "incr/1" do - put_all_and_trap_exits(a: 1) - Replicated.incr(:a) - refute_receive {:EXIT, _, :normal} - end - - test "nodes/0" do - put_all_and_trap_exits([]) - Replicated.nodes() - refute_receive {:EXIT, _, :normal} - end - - test "put/2" do - put_all_and_trap_exits([]) - Replicated.put(:a, 1) - refute_receive {:EXIT, _, :normal} - end - - test "put_all/1" do - put_all_and_trap_exits([]) - Replicated.put_all(a: 1, b: 2, c: 3) - refute_receive {:EXIT, _, :normal} - end - - test "count_all/2" do - put_all_and_trap_exits([]) - Replicated.count_all() - refute_receive {:EXIT, _, :normal} - end - - test "stream/0" do - put_all_and_trap_exits(a: 1, b: 2, c: 3) - Replicated.stream() |> Enum.take(10) - refute_receive {:EXIT, _, :normal} - end - - test "take/1" do - put_all_and_trap_exits(a: 1) - Replicated.take(:a) - refute_receive {:EXIT, _, :normal} - end - - # Put the values, ensure we didn't generate a message before trapping exits, - # then trap exits. - defp put_all_and_trap_exits(kv_pairs) do - Replicated.put_all(kv_pairs, ttl: :infinity) - refute_receive {:EXIT, _, :normal} - Process.flag(:trap_exit, true) - end - end - - ## Helpers - - defp assert_for_all_replicas(cache, action, args, expected) do - assert {res_lst, []} = - :rpc.multicall( - cache.nodes(), - cache, - :with_dynamic_cache, - [@cache_name, cache, action, args] - ) - - Enum.each(res_lst, fn res -> assert res == expected end) - end - - defp cluster_nodes do - [node() | Node.list()] -- [:"node3@127.0.0.1", :"node4@127.0.0.1"] - end -end diff --git a/test/nebulex/adapters/stats_test.exs b/test/nebulex/adapters/stats_test.exs deleted file mode 100644 index 3e5b67ca..00000000 --- a/test/nebulex/adapters/stats_test.exs +++ /dev/null @@ -1,367 +0,0 @@ -defmodule Nebulex.Adapters.StatsTest do - use ExUnit.Case - - import Nebulex.CacheCase - - alias Nebulex.Cache.Stats - - ## Shared cache - - defmodule Cache do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Multilevel - - defmodule L1 do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - - defmodule L2 do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Replicated - end - - defmodule L3 do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Partitioned - end - - defmodule L4 do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - end - - ## Shared constants - - @config [ - model: :inclusive, - levels: [ - {Cache.L1, gc_interval: :timer.hours(1), backend: :shards}, - {Cache.L2, primary: [gc_interval: :timer.hours(1)]}, - {Cache.L3, primary: [gc_interval: :timer.hours(1)]} - ] - ] - - @event [:nebulex, :adapters, :stats_test, :cache, :stats] - - ## Tests - - describe "(multilevel) stats/0" do - setup_with_cache(Cache, [stats: true] ++ @config) - - test "hits and misses" do - :ok = Cache.put_all(a: 1, b: 2) - - assert Cache.get(:a) == 1 - assert Cache.has_key?(:a) - assert Cache.ttl(:b) == :infinity - refute Cache.get(:c) - refute Cache.get(:d) - - assert Cache.get_all([:a, :b, :c, :d]) == %{a: 1, b: 2} - - assert_stats_measurements(Cache, - l1: [hits: 5, misses: 4, writes: 2], - l2: [hits: 0, misses: 4, writes: 2], - l3: [hits: 0, misses: 4, writes: 2] - ) - end - - test "writes and updates" do - assert Cache.put_all(a: 1, b: 2) == :ok - assert Cache.put_all(%{a: 1, b: 2}) == :ok - refute Cache.put_new_all(a: 1, b: 2) - assert Cache.put_new_all(c: 3, d: 4, e: 3) - assert Cache.put(1, 1) == :ok - refute Cache.put_new(1, 2) - refute Cache.replace(2, 2) - assert Cache.put_new(2, 2) - assert Cache.replace(2, 22) - assert Cache.incr(:counter) == 1 - assert Cache.incr(:counter) == 2 - refute Cache.expire(:f, 1000) - assert Cache.expire(:a, 1000) - refute Cache.touch(:f) - assert Cache.touch(:b) - - :ok = Process.sleep(1100) - refute Cache.get(:a) - - wait_until(fn -> - assert_stats_measurements(Cache, - l1: [expirations: 1, misses: 1, writes: 10, updates: 4], - l2: [expirations: 1, misses: 1, writes: 10, updates: 4], - l3: [expirations: 1, misses: 1, writes: 10, updates: 4] - ) - end) - end - - test "evictions" do - entries = for x <- 1..10, do: {x, x} - :ok = Cache.put_all(entries) - - assert Cache.delete(1) == :ok - assert Cache.take(2) == 2 - refute Cache.take(20) - - assert_stats_measurements(Cache, - l1: [evictions: 2, misses: 1, writes: 10], - l2: [evictions: 2, misses: 1, writes: 10], - l3: [evictions: 2, misses: 1, writes: 10] - ) - - assert Cache.delete_all() == 24 - - assert_stats_measurements(Cache, - l1: [evictions: 10, misses: 1, writes: 10], - l2: [evictions: 10, misses: 1, writes: 10], - l3: [evictions: 10, misses: 1, writes: 10] - ) - end - - test "expirations" do - :ok = Cache.put_all(a: 1, b: 2) - :ok = Cache.put_all([c: 3, d: 4], ttl: 1000) - - assert Cache.get_all([:a, :b, :c, :d]) == %{a: 1, b: 2, c: 3, d: 4} - - :ok = Process.sleep(1100) - assert Cache.get_all([:a, :b, :c, :d]) == %{a: 1, b: 2} - - wait_until(fn -> - assert_stats_measurements(Cache, - l1: [evictions: 2, expirations: 2, hits: 6, misses: 2, writes: 4], - l2: [evictions: 2, expirations: 2, hits: 0, misses: 2, writes: 4], - l3: [evictions: 2, expirations: 2, hits: 0, misses: 2, writes: 4] - ) - end) - end - end - - describe "(replicated) stats/0" do - alias Cache.L2, as: Replicated - - setup_with_cache(Replicated, [stats: true] ++ @config) - - test "hits and misses" do - :ok = Replicated.put_all(a: 1, b: 2) - - assert Replicated.get(:a) == 1 - assert Replicated.get_all([:a, :b, :c, :d]) == %{a: 1, b: 2} - - assert %Nebulex.Stats{measurements: measurements} = Replicated.stats() - assert measurements.hits == 3 - assert measurements.misses == 2 - end - end - - describe "(partitioned) stats/0" do - alias Cache.L3, as: Partitioned - - setup_with_cache(Partitioned, [stats: true] ++ @config) - - test "hits and misses" do - :ok = Partitioned.put_all(a: 1, b: 2) - - assert Partitioned.get(:a) == 1 - assert Partitioned.get_all([:a, :b, :c, :d]) == %{a: 1, b: 2} - - assert %Nebulex.Stats{measurements: measurements} = Partitioned.stats() - assert measurements.hits == 3 - assert measurements.misses == 2 - end - end - - describe "disabled stats in a cache level" do - setup_with_cache( - Cache, - [stats: true] ++ - Keyword.update!( - @config, - :levels, - &(&1 ++ [{Cache.L4, gc_interval: :timer.hours(1), stats: false}]) - ) - ) - - test "ignored when returning stats" do - measurements = Cache.stats().measurements - assert Map.get(measurements, :l1) - assert Map.get(measurements, :l2) - assert Map.get(measurements, :l3) - refute Map.get(measurements, :l4) - end - end - - describe "cache init error" do - test "because invalid stats option" do - _ = Process.flag(:trap_exit, true) - - {:error, {%ArgumentError{message: msg}, _}} = - Cache.start_link(stats: 123, levels: [{Cache.L1, []}]) - - assert msg == "expected stats: to be boolean, got: 123" - end - - test "L1: invalid stats option" do - _ = Process.flag(:trap_exit, true) - - {:error, {:shutdown, {_, _, {:shutdown, {_, Cache.L1, {error, _}}}}}} = - Cache.start_link(stats: true, levels: [{Cache.L1, [stats: 123]}]) - - assert error == %ArgumentError{message: "expected stats: to be boolean, got: 123"} - end - - test "L2: invalid stats option" do - _ = Process.flag(:trap_exit, true) - - {:error, {:shutdown, {_, _, {:shutdown, {_, Cache.L2, {error, _}}}}}} = - Cache.start_link(stats: true, levels: [{Cache.L1, []}, {Cache.L2, [stats: 123]}]) - - assert error == %ArgumentError{message: "expected stats: to be boolean, got: 123"} - end - - test "L3: invalid stats option" do - _ = Process.flag(:trap_exit, true) - - {:error, {:shutdown, {_, _, {:shutdown, {_, Cache.L3, {error, _}}}}}} = - Cache.start_link( - stats: true, - levels: [{Cache.L1, []}, {Cache.L2, []}, {Cache.L3, [stats: 123]}] - ) - - assert error == %ArgumentError{message: "expected stats: to be boolean, got: 123"} - end - end - - describe "new generation" do - alias Cache.L1 - alias Cache.L2.Primary, as: L2Primary - alias Cache.L3.Primary, as: L3Primary - - setup_with_cache(Cache, [stats: true] ++ @config) - - test "updates evictions" do - :ok = Cache.put_all(a: 1, b: 2, c: 3) - assert Cache.count_all() == 9 - - assert_stats_measurements(Cache, - l1: [evictions: 0, writes: 3], - l2: [evictions: 0, writes: 3], - l3: [evictions: 0, writes: 3] - ) - - _ = L1.new_generation() - assert Cache.count_all() == 9 - - assert_stats_measurements(Cache, - l1: [evictions: 0, writes: 3], - l2: [evictions: 0, writes: 3], - l3: [evictions: 0, writes: 3] - ) - - _ = L1.new_generation() - assert Cache.count_all() == 6 - - assert_stats_measurements(Cache, - l1: [evictions: 3, writes: 3], - l2: [evictions: 0, writes: 3], - l3: [evictions: 0, writes: 3] - ) - - _ = L2Primary.new_generation() - _ = L2Primary.new_generation() - assert Cache.count_all() == 3 - - assert_stats_measurements(Cache, - l1: [evictions: 3, writes: 3], - l2: [evictions: 3, writes: 3], - l3: [evictions: 0, writes: 3] - ) - - _ = L3Primary.new_generation() - _ = L3Primary.new_generation() - assert Cache.count_all() == 0 - - assert_stats_measurements(Cache, - l1: [evictions: 3, writes: 3], - l2: [evictions: 3, writes: 3], - l3: [evictions: 3, writes: 3] - ) - end - end - - describe "disabled stats:" do - setup_with_cache(Cache, @config) - - test "stats/0 returns nil" do - refute Cache.stats() - end - - test "dispatch_stats/1 is skipped" do - with_telemetry_handler(__MODULE__, [@event], fn -> - :ok = Cache.dispatch_stats() - - refute_receive {@event, _, %{cache: Nebulex.Cache.StatsTest.Cache}} - end) - end - end - - describe "dispatch_stats/1" do - setup_with_cache(Cache, [stats: true] ++ @config) - - test "emits a telemetry event when called" do - with_telemetry_handler(__MODULE__, [@event], fn -> - :ok = Cache.dispatch_stats(metadata: %{node: node()}) - node = node() - - assert_receive {@event, measurements, - %{cache: Nebulex.Adapters.StatsTest.Cache, node: ^node}} - - assert measurements == %{ - l1: %{hits: 0, misses: 0, writes: 0, evictions: 0, expirations: 0, updates: 0}, - l2: %{hits: 0, misses: 0, writes: 0, evictions: 0, expirations: 0, updates: 0}, - l3: %{hits: 0, misses: 0, writes: 0, evictions: 0, expirations: 0, updates: 0} - } - end) - end - end - - describe "dispatch_stats/1 with dynamic cache" do - setup_with_dynamic_cache( - Cache, - :stats_with_dispatch, - [telemetry_prefix: [:my_event], stats: true] ++ @config - ) - - test "emits a telemetry event with custom telemetry_prefix when called" do - with_telemetry_handler(__MODULE__, [[:my_event, :stats]], fn -> - :ok = Cache.dispatch_stats(metadata: %{foo: :bar}) - - assert_receive {[:my_event, :stats], measurements, - %{cache: :stats_with_dispatch, foo: :bar}} - - assert measurements == %{ - l1: %{hits: 0, misses: 0, writes: 0, evictions: 0, expirations: 0, updates: 0}, - l2: %{hits: 0, misses: 0, writes: 0, evictions: 0, expirations: 0, updates: 0}, - l3: %{hits: 0, misses: 0, writes: 0, evictions: 0, expirations: 0, updates: 0} - } - end) - end - end - - ## Helpers - - defp assert_stats_measurements(cache, levels) do - measurements = cache.stats().measurements - - for {level, stats} <- levels, {stat, expected} <- stats do - assert get_in(measurements, [level, stat]) == expected - end - end -end diff --git a/test/nebulex/cache/info_stats_test.exs b/test/nebulex/cache/info_stats_test.exs new file mode 100644 index 00000000..5e02ee5b --- /dev/null +++ b/test/nebulex/cache/info_stats_test.exs @@ -0,0 +1,128 @@ +defmodule Nebulex.Cache.InfoStatsTest do + use ExUnit.Case, asyc: true + use Mimic + + import Nebulex.CacheCase + + alias Nebulex.Adapters.Common.Info.Stats + + ## Internals + + defmodule Cache do + @moduledoc false + use Nebulex.Cache, + otp_app: :nebulex, + adapter: Nebulex.TestAdapter + end + + ## Tests + + describe "c:Nebulex.Cache.stats/1" do + setup_with_cache Cache, stats: true + + test "returns an error" do + Nebulex.Cache.Registry + |> expect(:lookup, fn _ -> {:ok, %{adapter: Nebulex.FakeAdapter}} end) + + assert Cache.info() == {:error, %Nebulex.Error{module: Nebulex.Error, reason: :error}} + end + + test "hits and misses" do + :ok = Cache.put_all!(a: 1, b: 2) + + assert Cache.get!(:a) == 1 + assert Cache.has_key?(:a) + assert Cache.ttl!(:b) == :infinity + refute Cache.get!(:c) + refute Cache.get!(:d) + + assert Cache.get_all!({:in, [:a, :b, :c, :d]}) |> Map.new() == %{a: 1, b: 2} + + assert Cache.info!(:stats) == %{ + hits: 5, + misses: 4, + writes: 2, + evictions: 0, + expirations: 0, + deletions: 0, + updates: 0 + } + end + + test "writes and updates" do + assert Cache.put_all!(a: 1, b: 2) == :ok + assert Cache.put_all(%{a: 1, b: 2}) == :ok + refute Cache.put_new_all!(a: 1, b: 2) + assert Cache.put_new_all!(c: 3, d: 4, e: 3) + assert Cache.put!(1, 1) == :ok + refute Cache.put_new!(1, 2) + refute Cache.replace!(2, 2) + assert Cache.put_new!(2, 2) + assert Cache.replace!(2, 22) + assert Cache.incr!(:counter) == 1 + assert Cache.incr!(:counter) == 2 + refute Cache.expire!(:f, 1000) + assert Cache.expire!(:a, 1000) + refute Cache.touch!(:f) + assert Cache.touch!(:b) + + :ok = Process.sleep(1100) + + refute Cache.get!(:a) + + wait_until(fn -> + assert Cache.info!(:stats) == %{ + hits: 0, + misses: 1, + writes: 10, + evictions: 1, + expirations: 1, + deletions: 1, + updates: 4 + } + end) + end + + test "deletions" do + entries = for x <- 1..10, do: {x, x} + :ok = Cache.put_all!(entries) + + assert Cache.delete!(1) == :ok + assert Cache.take!(2) == 2 + + assert_raise Nebulex.KeyError, fn -> + Cache.take!(20) + end + + assert Cache.info!(:stats) == %{ + hits: 1, + misses: 1, + writes: 10, + evictions: 0, + expirations: 0, + deletions: 2, + updates: 0 + } + + assert Cache.delete_all!() == 8 + + assert Cache.info!(:stats) == %{ + hits: 1, + misses: 1, + writes: 10, + evictions: 0, + expirations: 0, + deletions: 10, + updates: 0 + } + end + end + + describe "disabled stats:" do + setup_with_cache Cache, stats: false + + test "c:Nebulex.Cache.stats/1 returns empty stats when counter is not set" do + assert Cache.info!(:stats) == Stats.new() + end + end +end diff --git a/test/nebulex/cache/info_test.exs b/test/nebulex/cache/info_test.exs new file mode 100644 index 00000000..7782ddb1 --- /dev/null +++ b/test/nebulex/cache/info_test.exs @@ -0,0 +1,89 @@ +defmodule Nebulex.Cache.InfoTest do + use ExUnit.Case, asyc: true + + import Nebulex.CacheCase + + alias Nebulex.Adapter + alias Nebulex.Adapters.Common.Info.Stats + + ## Internals + + defmodule Cache do + @moduledoc false + use Nebulex.Cache, + otp_app: :nebulex, + adapter: Nebulex.TestAdapter + end + + @empty_stats Stats.new() + + @empty_mem %{ + allocated_memory: 1_000_000, + used_memory: 0 + } + + ## Tests + + describe "c:Nebulex.Cache.info/1" do + setup_with_cache Cache, stats: true + + test "ok: returns all info" do + assert info = Cache.info!() + assert Cache.info!(:all) == info + + assert info[:server] == server_info() + assert info[:memory] == @empty_mem + assert info[:stats] == @empty_stats + end + + test "ok: returns item's info" do + assert Cache.info!(:server) == server_info() + assert Cache.info!(:memory) == @empty_mem + assert Cache.info!(:stats) == @empty_stats + end + + test "ok: returns multiple items info" do + assert Cache.info!([:server]) == %{server: server_info()} + assert Cache.info!([:memory]) == %{memory: @empty_mem} + assert Cache.info!([:server, :memory]) == %{server: server_info(), memory: @empty_mem} + end + + test "ok: raises an exception because the requested item doesn't exist" do + for spec <- [:unknown, [:memory, :unknown], [:unknown, :unknown]] do + assert_raise ArgumentError, ~r"invalid information specification key :unknown", fn -> + Cache.info!(spec) + end + end + end + end + + describe "c:Nebulex.Cache.info/1 (stats disabled)" do + setup_with_cache Cache, stats: false + + test "ok: returns all info" do + assert info = Cache.info!() + + assert info[:server] == server_info() + assert info[:memory] == @empty_mem + assert info[:stats] == @empty_stats + end + end + + ## Provate functions + + defp server_info do + {:ok, adapter_meta} = Adapter.lookup_meta(Cache) + + %{ + nbx_version: nbx_vsn(), + cache_module: adapter_meta[:cache], + cache_adapter: adapter_meta[:adapter], + cache_name: adapter_meta[:name], + cache_pid: adapter_meta[:pid] + } + end + + defp nbx_vsn do + Application.spec(:nebulex, :vsn) |> to_string() + end +end diff --git a/test/nebulex/cache/registry_test.exs b/test/nebulex/cache/registry_test.exs new file mode 100644 index 00000000..f681a469 --- /dev/null +++ b/test/nebulex/cache/registry_test.exs @@ -0,0 +1,33 @@ +defmodule Nebulex.Cache.RegistryTest do + use ExUnit.Case, async: true + + import Nebulex.CacheCase, only: [test_with_dynamic_cache: 3] + + alias Nebulex.TestCache.Cache + + describe "lookup/1" do + test "error: returns an error with reason ':registry_lookup_error'" do + assert Nebulex.Cache.Registry.lookup(self()) == + {:error, + %Nebulex.Error{ + module: Nebulex.Error, + reason: :registry_lookup_error, + opts: [cache: self()] + }} + end + end + + describe "all_running/0" do + test "ok: returns all running cache names" do + test_with_dynamic_cache(Cache, [name: :registry_test_cache], fn -> + assert :registry_test_cache in Nebulex.Cache.Registry.all_running() + end) + end + + test "ok: returns all running cache pids" do + test_with_dynamic_cache(Cache, [name: nil], fn -> + assert Nebulex.Cache.Registry.all_running() |> Enum.any?(&is_pid/1) + end) + end + end +end diff --git a/test/nebulex/cache/supervisor_test.exs b/test/nebulex/cache/supervisor_test.exs index 2de67b53..b628907e 100644 --- a/test/nebulex/cache/supervisor_test.exs +++ b/test/nebulex/cache/supervisor_test.exs @@ -4,7 +4,7 @@ defmodule Nebulex.Cache.SupervisorTest do defmodule MyCache do use Nebulex.Cache, otp_app: :nebulex, - adapter: Nebulex.Adapters.Local + adapter: Nebulex.TestAdapter @impl true def init(opts) do @@ -19,72 +19,103 @@ defmodule Nebulex.Cache.SupervisorTest do alias Nebulex.TestCache.Cache - test "fails on init because :ignore is returned" do - assert MyCache.start_link(ignore: true) == :ignore - end + describe "compile_config/1" do + test "error: missing :otp_app option" do + assert_raise ArgumentError, ~r"required :otp_app option not found", fn -> + Nebulex.Cache.Supervisor.compile_config(adapter: TestAdapter) + end + end - test "fails on compile_config because missing otp_app" do - assert_raise ArgumentError, "expected otp_app: to be given as argument", fn -> - Nebulex.Cache.Supervisor.compile_config(adapter: TestAdapter) + test "error: missing :adapter option" do + assert_raise ArgumentError, ~r"required :adapter option not found", fn -> + Nebulex.Cache.Supervisor.compile_config(otp_app: :nebulex) + end end - end - test "fails on compile_config because missing adapter" do - assert_raise ArgumentError, "expected adapter: to be given as argument", fn -> - Nebulex.Cache.Supervisor.compile_config(otp_app: :nebulex) + test "error: adapter was not compiled" do + msg = ~r"invalid value for :adapter option: adapter TestAdapter was not compiled" + + assert_raise ArgumentError, msg, fn -> + Nebulex.Cache.Supervisor.compile_config(otp_app: :nebulex, adapter: TestAdapter) + end end - end - test "fails on compile_config because adapter was not compiled" do - msg = ~r"adapter TestAdapter was not compiled, ensure" + test "error: adapter doesn't implement the required behaviour" do + msg = + "invalid value for :adapter option: expected the adapter module " <> + "given to Nebulex.Cache to list Nebulex.Adapter as a behaviour" + + assert_raise ArgumentError, msg, fn -> + defmodule MyAdapter do + end - assert_raise ArgumentError, msg, fn -> - Nebulex.Cache.Supervisor.compile_config(otp_app: :nebulex, adapter: TestAdapter) + defmodule MyCache2 do + use Nebulex.Cache, + otp_app: :nebulex, + adapter: MyAdapter + end + end + end + + test "error: invalid valurr for :adapter option" do + assert_raise ArgumentError, ~r"invalid value for :adapter option: expected a module", fn -> + Nebulex.Cache.Supervisor.compile_config(otp_app: :nebulex, adapter: 123) + end end end - test "fails on compile_config because adapter error" do - msg = "expected :adapter option given to Nebulex.Cache to list Nebulex.Adapter as a behaviour" + describe "start_link/1" do + test "starts anonymous cache" do + assert {:ok, pid} = Cache.start_link(name: nil) + assert Process.alive?(pid) - assert_raise ArgumentError, msg, fn -> - defmodule MyAdapter do - end + assert Cache.stop(pid, []) == :ok + refute Process.alive?(pid) + end + + test "starts cache with via" do + {:ok, _} = Registry.start_link(keys: :unique, name: Registry.ViaTest) + name = {:via, Registry, {Registry.ViaTest, "test"}} + + assert {:ok, pid} = Cache.start_link(name: name) + assert Process.alive?(pid) + + assert [{^pid, _}] = Registry.lookup(Registry.ViaTest, "test") - defmodule MyCache2 do + assert Cache.stop(pid, []) == :ok + refute Process.alive?(pid) + end + + test "starts cache with custom adapter" do + defmodule CustomCache do use Nebulex.Cache, otp_app: :nebulex, - adapter: MyAdapter + adapter: Nebulex.TestCache.AdapterMock end - Nebulex.Cache.Supervisor.compile_config(otp_app: :nebulex) - end - end + assert {:ok, _pid} = CustomCache.start_link(child_name: :custom_cache) - test "start cache with custom adapter" do - defmodule CustomCache do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.TestCache.AdapterMock - end + _ = Process.flag(:trap_exit, true) - assert {:ok, _pid} = CustomCache.start_link(child_name: :custom_cache) - _ = Process.flag(:trap_exit, true) + assert {:error, _error} = + CustomCache.start_link(name: :another_custom_cache, child_name: :custom_cache) - assert {:error, error} = - CustomCache.start_link(name: :another_custom_cache, child_name: :custom_cache) + assert CustomCache.stop() == :ok + end - assert_receive {:EXIT, _pid, ^error} - assert CustomCache.stop() == :ok - end + test "emits telemetry event upon cache start" do + with_telemetry_handler([[:nebulex, :cache, :init]], fn -> + {:ok, _} = Cache.start_link(name: :telemetry_test) - test "emits telemetry event upon cache start" do - with_telemetry_handler([[:nebulex, :cache, :init]], fn -> - {:ok, _} = Cache.start_link(name: :telemetry_test) + assert_receive {[:nebulex, :cache, :init], _, %{cache: Cache, opts: opts}} + assert opts[:telemetry_prefix] == [:nebulex, :test_cache, :cache] + assert opts[:name] == :telemetry_test + end) + end - assert_receive {[:nebulex, :cache, :init], _, %{cache: Cache, opts: opts}} - assert opts[:telemetry_prefix] == [:nebulex, :test_cache, :cache] - assert opts[:name] == :telemetry_test - end) + test "error: fails on init because :ignore is returned" do + assert MyCache.start_link(ignore: true) == :ignore + end end ## Helpers diff --git a/test/nebulex/cache_error_test.exs b/test/nebulex/cache_error_test.exs new file mode 100644 index 00000000..1099e5ee --- /dev/null +++ b/test/nebulex/cache_error_test.exs @@ -0,0 +1,23 @@ +defmodule Nebulex.CacheErrorTest do + use ExUnit.Case, async: true + use Mimic + + # Inherit error tests + use Nebulex.Cache.KVErrorTest + use Nebulex.Cache.KVExpirationErrorTest + + setup do + Nebulex.Cache.Registry + |> expect(:lookup, fn _ -> {:ok, %{adapter: Nebulex.FakeAdapter}} end) + + {:ok, cache: Nebulex.TestCache.Cache, name: :test_cache_local_error} + end + + describe "put!/3" do + test "raises an error", %{cache: cache} do + assert_raise RuntimeError, ~r"runtime error", fn -> + cache.put!(:error, %RuntimeError{}) + end + end + end +end diff --git a/test/nebulex/cache_test.exs b/test/nebulex/cache_test.exs new file mode 100644 index 00000000..d80623d2 --- /dev/null +++ b/test/nebulex/cache_test.exs @@ -0,0 +1,122 @@ +defmodule Nebulex.Adapters.CacheTest do + use ExUnit.Case, async: true + + # Cache API test cases + use Nebulex.CacheTestCase + + import Nebulex.CacheCase, only: [setup_with_dynamic_cache: 2] + + setup_with_dynamic_cache Nebulex.TestCache.Cache, :test_cache_local + + describe "KV:" do + test "get_and_update", %{cache: cache} do + fun = fn + nil -> {nil, 1} + val -> {val, val * 2} + end + + assert cache.get_and_update!(1, fun) == {nil, 1} + assert cache.get_and_update!(1, &{&1, &1 * 2}) == {1, 2} + assert cache.get_and_update!(1, &{&1, &1 * 3}) == {2, 6} + assert cache.get_and_update!(1, &{&1, nil}) == {6, 6} + assert cache.get!(1) == 6 + assert cache.get_and_update!(1, fn _ -> :pop end) == {6, nil} + assert cache.get_and_update!(1, fn _ -> :pop end) == {nil, nil} + assert cache.get_and_update!(3, &{&1, 3}) == {nil, 3} + end + + test "get_and_update fails because function returns invalid value", %{cache: cache} do + assert_raise ArgumentError, fn -> + cache.get_and_update(1, fn _ -> :other end) + end + end + + test "get_and_update fails because cache is not started", %{cache: cache} do + :ok = cache.stop() + + assert_raise Nebulex.Error, fn -> + assert cache.get_and_update!(1, fn _ -> :pop end) + end + end + + test "incr and update", %{cache: cache} do + assert cache.incr!(:counter) == 1 + assert cache.incr!(:counter) == 2 + + assert cache.get_and_update!(:counter, &{&1, &1 * 2}) == {2, 4} + assert cache.incr!(:counter) == 5 + + assert cache.update!(:counter, 1, &(&1 * 2)) == 10 + assert cache.incr!(:counter, -10) == 0 + + assert cache.put("foo", "bar") == :ok + + assert_raise Nebulex.Error, fn -> + cache.incr!("foo") + end + end + + test "incr with ttl", %{cache: cache} do + assert cache.incr!(:counter_with_ttl, 1, ttl: 1000) == 1 + assert cache.incr!(:counter_with_ttl) == 2 + assert cache.fetch!(:counter_with_ttl) == 2 + + :ok = Process.sleep(1010) + + assert {:error, %Nebulex.KeyError{key: :counter_with_ttl}} = cache.fetch(:counter_with_ttl) + + assert cache.incr!(:counter_with_ttl, 1, ttl: 5000) == 1 + assert {:ok, ttl} = cache.ttl(:counter_with_ttl) + assert ttl > 1000 + + assert cache.expire(:counter_with_ttl, 500) == {:ok, true} + + :ok = Process.sleep(600) + + assert {:error, %Nebulex.KeyError{key: :counter_with_ttl}} = cache.fetch(:counter_with_ttl) + end + + test "incr existing entry", %{cache: cache} do + assert cache.put(:counter, 0) == :ok + assert cache.incr!(:counter) == 1 + assert cache.incr!(:counter, 2) == 3 + end + end + + describe "queryable:" do + test "error because invalid query", %{cache: cache} do + for action <- [:get_all, :stream] do + assert {:error, %Nebulex.Error{reason: :invalid_query}} = + apply(cache, action, [:invalid]) + end + end + + test "raise exception because invalid query", %{cache: cache} do + for action <- [:get_all!, :stream!] do + assert_raise Nebulex.Error, ~r"invalid query", fn -> + apply(cache, action, [:invalid]) + end + end + end + end + + describe "error" do + test "because cache is stopped", %{cache: cache, name: name} do + :ok = cache.stop() + + assert cache.put(1, 13) == + {:error, + %Nebulex.Error{ + module: Nebulex.Error, + reason: :registry_lookup_error, + opts: [cache: name] + }} + + msg = ~r"could not lookup Nebulex cache" + + assert_raise Nebulex.Error, msg, fn -> cache.put!(1, 13) end + assert_raise Nebulex.Error, msg, fn -> cache.get!(1) end + assert_raise Nebulex.Error, msg, fn -> cache.delete!(1) end + end + end +end diff --git a/test/nebulex/caching_test.exs b/test/nebulex/caching_test.exs index 45c11bcf..3dbc0420 100644 --- a/test/nebulex/caching_test.exs +++ b/test/nebulex/caching_test.exs @@ -1,61 +1,58 @@ defmodule Nebulex.CachingTest do use ExUnit.Case, async: true - use Nebulex.Caching - - @behaviour Nebulex.Caching.KeyGenerator defmodule Cache do + @moduledoc false use Nebulex.Cache, otp_app: :nebulex, - adapter: Nebulex.Adapters.Local + adapter: Nebulex.TestAdapter end - defmodule CacheWithDefaultKeyGenerator do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local, - default_key_generator: __MODULE__ - - @behaviour Nebulex.Caching.KeyGenerator + use Nebulex.Caching, cache: Cache - @impl true - def generate(mod, fun, args), do: :erlang.phash2({mod, fun, args}) - end + import Nebulex.CacheCase defmodule YetAnotherCache do + @moduledoc false use Nebulex.Cache, otp_app: :nebulex, - adapter: Nebulex.Adapters.Local + adapter: Nebulex.TestAdapter end defmodule Meta do - defstruct [:id, :count] + @moduledoc false + @type t :: %__MODULE__{} + + defstruct [:id, :count] end - defmodule TestKeyGenerator do - @behaviour Nebulex.Caching.KeyGenerator + ## Tests - @impl true - def generate(_, :put_with_keygen, [arg1, _arg2]) do - arg1 - end + setup_with_cache Cache - def generate(mod, fun, args) do - :erlang.phash2({mod, fun, args}) + describe "caching definition" do + test "ok: valid :default_key_generator option" do + defmodule ValidCompileOptsTest do + use Nebulex.Caching, default_key_generator: Nebulex.Caching.SimpleKeyGenerator + end end - end - import Nebulex.CacheCase - - alias Nebulex.CachingTest.{Cache, Meta} + test "error: invalid :default_key_generator option" do + msg = ~r"invalid value for :default_key_generator option: key-generator InvalidKeyGenerator" - setup_with_cache(Cache) + assert_raise ArgumentError, msg, fn -> + defmodule InvalidCompileOptsTest do + use Nebulex.Caching, default_key_generator: InvalidKeyGenerator + end + end + end + end describe "decorator" do test "cacheable fails because missing cache" do - assert_raise ArgumentError, "expected cache: to be given as argument", fn -> - defmodule Test do + assert_raise ArgumentError, ~r"expected :cache option to be found within the decorator", fn -> + defmodule MissingCacheTest do use Nebulex.Caching @decorate cacheable(a: 1) @@ -63,10 +60,12 @@ defmodule Nebulex.CachingTest do {a, b} end end + + MissingCacheTest.t(1, 2) end end - test "cacheable fails invalid option :on_error" do + test "cacheable fails because invalid :on_error option value" do msg = "expected on_error: to be :raise or :nothing, got: :invalid" assert_raise ArgumentError, msg, fn -> @@ -81,7 +80,7 @@ defmodule Nebulex.CachingTest do end end - test "cache_evict fails invalid option :keys" do + test "cache_evict fails because invalid :keys option value" do msg = "expected keys: to be a list with at least one element, got: []" assert_raise ArgumentError, msg, fn -> @@ -99,294 +98,296 @@ defmodule Nebulex.CachingTest do describe "cacheable" do test "with default opts" do - refute Cache.get("x") - assert get_by_x("x") == nil - refute Cache.get("x") + refute Cache.get!("x") + assert get_by_xy("x") == nil + assert Cache.fetch!("x") == nil - assert get_by_x(1, 11) == 11 - assert Cache.get(1) == 11 + assert get_by_xy(1, 11) == 11 + assert Cache.get!(1) == 11 - assert get_by_x(2, {:ok, 22}) == {:ok, 22} - assert Cache.get(2) == {:ok, 22} + assert get_by_xy(2, {:ok, 22}) == {:ok, 22} + assert Cache.get!(2) == {:ok, 22} - assert get_by_x(3, :error) == :error - refute Cache.get(3) + assert get_by_xy(3, :error) == :error + refute Cache.get!(3) - assert get_by_x(4, {:error, 4}) == {:error, 4} - refute Cache.get(4) + assert get_by_xy(4, {:error, 4}) == {:error, 4} + refute Cache.get!(4) - refute Cache.get({:xy, 2}) - assert get_by_xy(:xy, 2) == {:xy, 4} - assert Cache.get({:xy, 2}) == {:xy, 4} + refute Cache.get!({:xy, 2}) + assert multiply_xy(:xy, 2) == {:xy, 4} + assert Cache.get!({:xy, 2}) == {:xy, 4} - :ok = Process.sleep(1100) - - refute Cache.get("x") - assert Cache.get(1) == 11 - assert Cache.get(2) == {:ok, 22} - refute Cache.get(3) - refute Cache.get(4) - assert Cache.get({:xy, 2}) == {:xy, 4} + assert Cache.fetch!("x") == nil + assert Cache.get!(1) == 11 + assert Cache.get!(2) == {:ok, 22} + refute Cache.get!(3) + refute Cache.get!(4) + assert Cache.get!({:xy, 2}) == {:xy, 4} end test "with opts" do - refute Cache.get("x") + refute Cache.get!("x") assert get_with_opts(1) == 1 - assert Cache.get(1) == 1 + assert Cache.get!(1) == 1 :ok = Process.sleep(1100) - refute Cache.get(1) + + refute Cache.get!(1) end test "with match function" do - refute Cache.get(:x) + refute Cache.get!(:x) assert get_with_match(:x) == :x - refute Cache.get(:x) + refute Cache.get!(:x) - refute Cache.get(:y) + refute Cache.get!(:y) assert get_with_match(:y) == :y - assert Cache.get(:y) + assert Cache.get!(:y) - refute Cache.get("true") - assert get_with_match_fun("true") == {:ok, "true"} - assert Cache.get("true") == {:ok, "true"} + refute Cache.get!(true) + assert get_with_match_fun(true) == {:ok, "true"} + assert Cache.get!(true) == {:ok, "true"} - refute Cache.get(1) + refute Cache.get!(1) assert get_with_match_fun(1) == {:ok, "1"} - assert Cache.get(1) == "1" + assert Cache.get!(1) == "1" - refute Cache.get({:ok, "hello"}) + refute Cache.get!({:ok, "hello"}) assert get_with_match_fun({:ok, "hello"}) == :error - refute Cache.get({:ok, "hello"}) + refute Cache.get!({:ok, "hello"}) + end + + test "with match function and context" do + refute Cache.get!(:x) + assert get_with_match_fun_and_ctx(:x) == {:ok, "x"} + assert_receive %{module: __MODULE__, function_name: :get_with_match_fun_and_ctx, args: [:x]} + assert Cache.get!(:x) == "x" + + refute Cache.get!(true) + assert get_with_match_fun_and_ctx(true) == {:ok, "true"} + assert_receive %{module: __MODULE__, function_name: :get_with_match_fun_and_ctx, args: [true]} + assert Cache.get!(true) == {:ok, "true"} end test "with match function and custom opts" do - refute Cache.get(300) + refute Cache.get!(300) assert get_with_custom_ttl(300) == {:ok, %{ttl: 300}} - assert Cache.get(300) == {:ok, %{ttl: 300}} + assert Cache.get!(300) == {:ok, %{ttl: 300}} :ok = Process.sleep(400) - refute Cache.get(300) + refute Cache.get!(300) end test "with default key" do assert get_with_default_key(123, {:foo, "bar"}) == :ok - assert [123, {:foo, "bar"}] |> :erlang.phash2() |> Cache.get() == :ok + assert [123, {:foo, "bar"}] |> :erlang.phash2() |> Cache.get!() == :ok + assert get_with_default_key(:foo, "bar") == :ok - assert [:foo, "bar"] |> :erlang.phash2() |> Cache.get() == :ok + assert [:foo, "bar"] |> :erlang.phash2() |> Cache.get!() == :ok end test "defining keys using structs and maps" do - refute Cache.get("x") + refute Cache.get!("x") + assert get_meta(%Meta{id: 1, count: 1}) == %Meta{id: 1, count: 1} - assert Cache.get({Meta, 1}) == %Meta{id: 1, count: 1} + assert Cache.get!({Meta, 1}) == %Meta{id: 1, count: 1} + + refute Cache.get!("y") - refute Cache.get("y") assert get_map(%{id: 1}) == %{id: 1} - assert Cache.get(1) == %{id: 1} + assert Cache.get!(1) == %{id: 1} end test "with multiple clauses" do - refute Cache.get(2) + refute Cache.get!(2) + assert multiple_clauses(2, 2) == 4 - assert Cache.get(2) == 4 + assert Cache.get!(2) == 4 + + refute Cache.get!("foo") - refute Cache.get("foo") assert multiple_clauses("foo", "bar") == {"foo", "bar"} - assert Cache.get("foo") == {"foo", "bar"} + assert Cache.get!("foo") == {"foo", "bar"} end test "without args" do - refute Cache.get(0) + refute Cache.get!(0) assert get_without_args() == "hello" - assert Cache.get(0) == "hello" + assert Cache.get!(0) == "hello" end test "with side effects and returning false (issue #111)" do - refute Cache.get("side-effect") + refute Cache.get!("side-effect") assert get_false_with_side_effect(false) == false - assert Cache.get("side-effect") == 1 + assert Cache.get!("side-effect") == 1 assert get_false_with_side_effect(false) == false - assert Cache.get("side-effect") == 1 + assert Cache.get!("side-effect") == 1 end end describe "cachable with references" do - setup_with_cache(YetAnotherCache) + setup_with_cache YetAnotherCache - test "with referenced key" do + test "returns referenced key" do # Expected values referenced_key = keyref "referenced_id" result = %{id: "referenced_id", name: "referenced_name"} - # Nothing is cached yet - refute Cache.get("referenced_id") - refute Cache.get("referenced_name") - - # First run: the function block is executed and its result is cached under - # the referenced key, and the referenced key is cached under the given key - assert get_with_referenced_key("referenced_name") == result - - # Assert the key points to the referenced key - assert Cache.get("referenced_name") == referenced_key - - # Assert the referenced key points to the cached value - assert Cache.get("referenced_id") == result - - # Next run: the value should come from the cache - assert get_with_referenced_key("referenced_name") == result - - # Simulate a cache eviction for the referenced key - :ok = Cache.delete("referenced_id") - - # The value under the referenced key should not longer exist - refute Cache.get("referenced_id") - - # Assert the key still points to the referenced key - assert Cache.get("referenced_name") == referenced_key - - # Next run: the key does exist but the referenced key doesn't, then the - # function block is executed and the result is cached under the referenced - # key back again - assert get_with_referenced_key("referenced_name") == result + assert_common_references_flow("referenced_id", referenced_key, result, &get_with_keyref/1) + end - # Assert the key points to the referenced key - assert Cache.get("referenced_name") == referenced_key + test "returns referenced key by calling function with context" do + # Expected values + key = :erlang.phash2({"referenced_id", ["referenced_name"]}) + referenced_key = keyref key + result = %{id: "referenced_id", name: "referenced_name"} - # Assert the referenced key points to the cached value - assert Cache.get("referenced_id") == result + assert_common_references_flow(key, referenced_key, result, &get_with_keyref_fn_ctx/1) + end - # Similate the referenced key is overridden - :ok = Cache.put("referenced_name", "overridden") + test "returns referenced key by calling referenced cache" do + # Expected values + referenced_key = keyref YetAnotherCache, "referenced_id" + result = %{id: "referenced_id", name: "referenced_name"} - # The referenced key is overridden - assert get_with_referenced_key("referenced_name") == "overridden" + assert_common_references_flow( + YetAnotherCache, + "referenced_id", + referenced_key, + result, + &get_with_keyref_cache/1 + ) end - test "with referenced key from args" do + test "returns referenced key from the args" do # Expected values referenced_key = keyref "id" result = %{attrs: %{id: "id"}, name: "name"} # Nothing is cached yet - refute Cache.get("id") - refute Cache.get("name") + refute Cache.get!("id") + refute Cache.get!("name") # First run: the function block is executed and its result is cached under # the referenced key, and the referenced key is cached under the given key - assert get_with_referenced_key_from_args("name", %{id: "id"}) == result + assert get_with_keyref_from_args("name", %{id: "id"}) == result # Assert the key points to the referenced key - assert Cache.get("name") == referenced_key + assert Cache.get!("name") == referenced_key # Assert the referenced key points to the cached value - assert Cache.get("id") == result + assert Cache.get!("id") == result # Next run: the value should come from the cache - assert get_with_referenced_key_from_args("name", %{id: "id"}) == result + assert get_with_keyref_from_args("name", %{id: "id"}) == result end - test "returns fixed referenced" do + test "returns fixed referenced key" do # Expected values referenced_key = keyref "fixed_id" result = %{id: "fixed_id", name: "name"} # Nothing is cached yet - refute Cache.get("fixed_id") - refute Cache.get("name") + refute Cache.get!("fixed_id") + refute Cache.get!("name") # First run: the function block is executed and its result is cached under # the referenced key, and the referenced key is cached under the given key - assert get_with_fixed_referenced_key("name") == result + assert get_with_fixed_keyref("name") == result # Assert the key points to the referenced key - assert Cache.get("name") == referenced_key + assert Cache.get!("name") == referenced_key # Assert the referenced key points to the cached value - assert Cache.get("fixed_id") == result + assert Cache.get!("fixed_id") == result # Next run: the value should come from the cache - assert get_with_fixed_referenced_key("name") == result + assert get_with_fixed_keyref("name") == result end - test "returns referenced key by calling referenced cache" do - # Expected values - referenced_key = keyref YetAnotherCache, "referenced_id" - result = %{id: "referenced_id", name: "referenced_name"} + ## Private functions + + defp assert_common_references_flow(ref_cache \\ nil, key, referenced_key, result, fun) do + # Resolve ref cache if any + ref_cache = ref_cache || Cache # Nothing is cached yet - refute Cache.get("referenced_id") - refute Cache.get("referenced_name") + refute Cache.get!("referenced_id") + refute Cache.get!("referenced_name") # First run: the function block is executed and its result is cached under # the referenced key, and the referenced key is cached under the given key - assert get_with_ref_key_with_cache("referenced_name") == result + assert fun.("referenced_name") == result # Assert the key points to the referenced key - assert Cache.get("referenced_name") == referenced_key + assert Cache.get!("referenced_name") == referenced_key # Assert the referenced key points to the cached value - assert YetAnotherCache.get("referenced_id") == result + assert ref_cache.get!(key) == result # Next run: the value should come from the cache - assert get_with_ref_key_with_cache("referenced_name") == result + assert fun.("referenced_name") == result # Simulate a cache eviction for the referenced key - :ok = YetAnotherCache.delete("referenced_id") + :ok = ref_cache.delete!(key) # The value under the referenced key should not longer exist - refute YetAnotherCache.get("referenced_id") + refute ref_cache.get!(key) # Assert the key still points to the referenced key - assert Cache.get("referenced_name") == referenced_key + assert Cache.get!("referenced_name") == referenced_key # Next run: the key does exist but the referenced key doesn't, then the # function block is executed and the result is cached under the referenced # key back again - assert get_with_ref_key_with_cache("referenced_name") == result + assert fun.("referenced_name") == result # Assert the key points to the referenced key - assert Cache.get("referenced_name") == referenced_key + assert Cache.get!("referenced_name") == referenced_key # Assert the referenced key points to the cached value - assert YetAnotherCache.get("referenced_id") == result + assert ref_cache.get!(key) == result # Similate the referenced key is overridden - :ok = Cache.put("referenced_name", "overridden") + :ok = Cache.put!("referenced_name", "overridden") # The referenced key is overridden - assert get_with_ref_key_with_cache("referenced_name") == "overridden" + assert fun.("referenced_name") == "overridden" + + # Assert the previously referenced key remains the same + assert ref_cache.get!(key) == result end end describe "cache_put" do test "with default opts" do assert update_fun(1) == nil - refute Cache.get(1) + refute Cache.get!(1) assert update_fun(1, :error) == :error - refute Cache.get(1) + refute Cache.get!(1) assert update_fun(1, {:error, :error}) == {:error, :error} - refute Cache.get(1) + refute Cache.get!(1) assert set_keys(x: 1, y: 2, z: 3) == :ok assert update_fun(:x, 2) == 2 assert update_fun(:y, {:ok, 4}) == {:ok, 4} - assert Cache.get(:x) == 2 - assert Cache.get(:y) == {:ok, 4} - assert Cache.get(:z) == 3 + assert Cache.get!(:x) == 2 + assert Cache.get!(:y) == {:ok, 4} + assert Cache.get!(:z) == 3 :ok = Process.sleep(1100) - assert Cache.get(:x) == 2 - assert Cache.get(:y) == {:ok, 4} - assert Cache.get(:z) == 3 + assert Cache.get!(:x) == 2 + assert Cache.get!(:y) == {:ok, 4} + assert Cache.get!(:z) == 3 end test "with opts" do @@ -395,37 +396,41 @@ defmodule Nebulex.CachingTest do assert update_with_opts(:y) == :y :ok = Process.sleep(1100) - refute Cache.get(:x) - refute Cache.get(:y) + + refute Cache.get!(:x) + refute Cache.get!(:y) end test "with match function" do assert update_with_match(:x) == {:ok, "x"} + assert Cache.get!(:x) == "x" + assert update_with_match(true) == {:ok, "true"} + assert Cache.get!(true) == {:ok, "true"} + assert update_with_match({:z, 1}) == :error - assert Cache.get(:x) == "x" - assert Cache.get(true) == {:ok, "true"} - refute Cache.get({:z, 1}) + refute Cache.get!({:z, 1}) end test "without args" do - refute Cache.get(0) + refute Cache.get!(0) assert update_without_args() == "hello" - assert Cache.get(0) == "hello" + assert Cache.get!(0) == "hello" end test "with multiple keys and ttl" do assert set_keys(x: 1, y: 2, z: 3) == :ok assert update_with_multiple_keys(:x, :y) == {:ok, {"x", "y"}} - assert Cache.get(:x) == {"x", "y"} - assert Cache.get(:y) == {"x", "y"} - assert Cache.get(:z) == 3 + assert Cache.get!(:x) == {"x", "y"} + assert Cache.get!(:y) == {"x", "y"} + assert Cache.get!(:z) == 3 :ok = Process.sleep(1100) - refute Cache.get(:x) - refute Cache.get(:y) - assert Cache.get(:z) == 3 + + refute Cache.get!(:x) + refute Cache.get!(:y) + assert Cache.get!(:z) == 3 end end @@ -434,269 +439,222 @@ defmodule Nebulex.CachingTest do assert set_keys(x: 1, y: 2, z: 3) == :ok assert evict_fun(:x) == :x - refute Cache.get(:x) - assert Cache.get(:y) == 2 - assert Cache.get(:z) == 3 + refute Cache.get!(:x) + assert Cache.get!(:y) == 2 + assert Cache.get!(:z) == 3 assert evict_fun(:y) == :y - refute Cache.get(:x) - refute Cache.get(:y) - assert Cache.get(:z) == 3 + refute Cache.get!(:x) + refute Cache.get!(:y) + assert Cache.get!(:z) == 3 end test "with multiple keys" do assert set_keys(x: 1, y: 2, z: 3) == :ok + assert evict_keys_fun(:x, :y) == {:x, :y} - refute Cache.get(:x) - refute Cache.get(:y) - assert Cache.get(:z) == 3 + + refute Cache.get!(:x) + refute Cache.get!(:y) + assert Cache.get!(:z) == 3 end test "all entries" do assert set_keys(x: 1, y: 2, z: 3) == :ok + assert evict_all_fun("hello") == "hello" - refute Cache.get(:x) - refute Cache.get(:y) - refute Cache.get(:z) + + refute Cache.get!(:x) + refute Cache.get!(:y) + refute Cache.get!(:z) end test "without args" do - refute Cache.get(0) + refute Cache.get!(0) assert get_without_args() == "hello" - assert Cache.get(0) == "hello" + assert Cache.get!(0) == "hello" assert evict_without_args() == "hello" - refute Cache.get(0) + refute Cache.get!(0) end end - describe "option :key_generator on" do + describe "option :key with custom key generator on" do test "cacheable annotation" do - key = TestKeyGenerator.generate(__MODULE__, :get_with_keygen, [1, 2]) + key = default_hash(:get_with_keygen, 2, [1, 2]) - refute Cache.get(key) + refute Cache.get!(key) assert get_with_keygen(1, 2) == {1, 2} - assert Cache.get(key) == {1, 2} - end - - test "cache_evict annotation" do - key = TestKeyGenerator.generate(__MODULE__, :evict_with_keygen, ["foo", "bar"]) - - :ok = Cache.put(key, {"foo", "bar"}) - assert Cache.get(key) == {"foo", "bar"} - - assert evict_with_keygen("foo", "bar") == {"foo", "bar"} - refute Cache.get(key) - end - - test "cache_put annotation" do - assert multiple_clauses(2, 2) == 4 - assert Cache.get(2) == 4 - - assert put_with_keygen(2, 4) == 8 - assert multiple_clauses(2, 2) == 8 - assert Cache.get(2) == 8 - - assert put_with_keygen(2, 8) == 16 - assert multiple_clauses(2, 2) == 16 - assert Cache.get(2) == 16 + assert Cache.get!(key) == {1, 2} end test "cacheable annotation with multiple function clauses and pattern-matching " do - key = TestKeyGenerator.generate(__MODULE__, :get_with_keygen2, [1, 2]) + key = default_hash(:get_with_keygen2, 3, [1, 2]) - refute Cache.get(key) + refute Cache.get!(key) assert get_with_keygen2(1, 2, %{a: {1, 2}}) == {1, 2} - assert Cache.get(key) == {1, 2} + assert Cache.get!(key) == {1, 2} - key = TestKeyGenerator.generate(__MODULE__, :get_with_keygen2, [1, 2, %{b: 3}]) + key = default_hash(:get_with_keygen2, 3, [1, 2, %{b: 3}]) - refute Cache.get(key) + refute Cache.get!(key) assert get_with_keygen2(1, 2, %{b: 3}) == {1, 2, %{b: 3}} - assert Cache.get(key) == {1, 2, %{b: 3}} + assert Cache.get!(key) == {1, 2, %{b: 3}} end test "cacheable annotation with ignored arguments" do - key = TestKeyGenerator.generate(__MODULE__, :get_with_keygen3, [1, %{b: 2}]) + key = default_hash(:get_with_keygen3, 7, [1, %{b: 2}]) - refute Cache.get(key) + refute Cache.get!(key) assert get_with_keygen3(1, 2, 3, {1, 2}, [1], %{a: 1}, %{b: 2}) == {1, %{b: 2}} - assert Cache.get(key) == {1, %{b: 2}} + assert Cache.get!(key) == {1, %{b: 2}} end - end - - describe "default key generator on" do - setup_with_cache(CacheWithDefaultKeyGenerator) - test "cacheable annotation" do - key = CacheWithDefaultKeyGenerator.generate(__MODULE__, :get_with_default_key_generator, [1]) + test "cacheable annotation with custom key" do + key = {:a, :b, 1, 2} - refute CacheWithDefaultKeyGenerator.get(key) - assert get_with_default_key_generator(1) == 1 - assert CacheWithDefaultKeyGenerator.get(key) == 1 + refute Cache.get!(key) + assert get_with_keygen4(1, 2) == {1, 2} + assert Cache.get!(key) == {1, 2} end test "cache_evict annotation" do - key = CacheWithDefaultKeyGenerator.generate(__MODULE__, :del_with_default_key_generator, [1]) + key = default_hash(:evict_with_keygen, 2, ["foo", "bar"]) - :ok = CacheWithDefaultKeyGenerator.put(key, 1) - assert CacheWithDefaultKeyGenerator.get(key) == 1 - - assert del_with_default_key_generator(1) == 1 - refute CacheWithDefaultKeyGenerator.get(key) - end - end - - describe "key-generator tuple on" do - test "cacheable annotation" do - key = generate_key({1, 2}) - - refute Cache.get(key) - assert get_with_tuple_keygen(1, 2) == {1, 2} - assert Cache.get(key) == {1, 2} - end - - test "cacheable annotation (with key-generator: TestKeyGenerator)" do - key = TestKeyGenerator.generate(:a, :b, [1]) + :ok = Cache.put(key, {"foo", "bar"}) + assert Cache.get!(key) == {"foo", "bar"} - refute Cache.get(key) - assert get_with_tuple_keygen2(1, 2) == {1, 2} - assert Cache.get(key) == {1, 2} + assert evict_with_keygen("foo", "bar") == {"foo", "bar"} + refute Cache.get!(key) end - test "cache_evict annotation" do - key = generate_key({"foo", "bar"}) + test "cache_evict annotation with custom key" do + key = {"foo", "bar"} :ok = Cache.put(key, {"foo", "bar"}) - assert Cache.get(key) == {"foo", "bar"} + assert Cache.get!(key) == {"foo", "bar"} - assert evict_with_tuple_keygen("foo", "bar") == {"foo", "bar"} - refute Cache.get(key) + assert evict_with_keygen2("foo", "bar") == {"foo", "bar"} + refute Cache.get!(key) end test "cache_put annotation" do assert multiple_clauses(2, 2) == 4 - assert Cache.get(2) == 4 + assert Cache.get!(2) == 4 - assert put_with_tuple_keygen(2, 4) == 8 + assert put_with_keygen(2, 4) == 8 assert multiple_clauses(2, 2) == 8 - assert Cache.get(2) == 8 + assert Cache.get!(2) == 8 - assert put_with_tuple_keygen(2, 8) == 16 + assert put_with_keygen(2, 8) == 16 assert multiple_clauses(2, 2) == 16 - assert Cache.get(2) == 16 + assert Cache.get!(2) == 16 end - end - describe "key-generator with shorthand tuple on" do - test "cacheable annotation" do - key = TestKeyGenerator.generate(__MODULE__, :get_with_shorthand_tuple_keygen, [1]) + test "cache_put annotation with custom key" do + key = {:tuple, 2} - refute Cache.get(key) - assert get_with_shorthand_tuple_keygen(1, 2, 3) == {1, 2} - assert Cache.get(key) == {1, 2} - end + assert Cache.put(key, 2) == :ok + assert Cache.get!(key) == 2 - test "cacheable annotation (with key-generator: __MODULE__)" do - key = generate(__MODULE__, :get_with_shorthand_tuple_keygen2, [1]) + assert put_with_keygen2(2, 4) == 8 + assert Cache.get!(key) == 8 - refute Cache.get(key) - assert get_with_shorthand_tuple_keygen2(1, 2) == {1, 2} - assert Cache.get(key) == {1, 2} + assert put_with_keygen2(2, 8) == 16 + assert Cache.get!(key) == 16 end + end - test "cache_evict annotation" do - key = TestKeyGenerator.generate(__MODULE__, :evict_with_shorthand_tuple_keygen, ["foo"]) - - :ok = Cache.put(key, {"foo", "bar"}) - assert Cache.get(key) == {"foo", "bar"} - - assert evict_with_shorthand_tuple_keygen("foo", "bar") == {"foo", "bar"} - refute Cache.get(key) + describe "option :on_error on" do + test "cacheable annotation raises a cache error" do + assert_raise Nebulex.Error, ~r"could not lookup", fn -> + get_and_raise_exception(:raise) + end end - test "cache_put annotation" do - key = TestKeyGenerator.generate(__MODULE__, :put_with_shorthand_tuple_keygen, ["foo"]) + test "cacheable annotation ignores the exception" do + assert get_ignoring_exception("foo") == "foo" + end - refute Cache.get(key) - assert put_with_shorthand_tuple_keygen("foo", "bar") == {"foo", "bar"} - assert Cache.get(key) == {"foo", "bar"} + test "cache_put annotation raises a cache error" do + assert_raise Nebulex.Error, ~r"could not lookup", fn -> + update_and_raise_exception(:raise) + end end - end - describe "option :on_error on" do - test "cacheable annotation" do - assert get_with_exception("foo") == "foo" + test "cache_put annotation ignores the exception" do + assert update_ignoring_exception("foo") == "foo" end - test "cache_put annotation" do - assert update_with_exception("foo") == "foo" + test "cache_evict annotation raises a cache error" do + assert_raise Nebulex.Error, ~r"could not lookup", fn -> + evict_and_raise_exception(:raise) + end end - test "cache_evict annotation" do - assert evict_with_exception("foo") == "foo" + test "cache_evict annotation ignores the exception" do + assert evict_ignoring_exception("foo") == "foo" end end - describe "option :cache with MFA" do + describe "option :cache with anonymous function on" do test "cacheable annotation" do - refute Cache.get("foo") - assert get_mfa_cache_without_extra_args("foo") == "foo" - assert Cache.get("foo") == "foo" + refute Cache.get!("foo") + + assert get_fn_cache("foo") == "foo" + assert_receive %{module: __MODULE__, function_name: :get_fn_cache, args: ["foo"]} + assert Cache.get!("foo") == "foo" end test "cache_put annotation" do :ok = Cache.put("foo", "bar") - assert update_mfa_cache_without_extra_args("bar bar") == "bar bar" - assert Cache.get("foo") == "bar bar" + assert update_fn_cache("bar bar") == "bar bar" + assert_receive %{module: __MODULE__, function_name: :update_fn_cache, args: ["bar bar"]} + assert Cache.get!("foo") == "bar bar" end test "cache_evict annotation" do :ok = Cache.put("foo", "bar") - assert delete_mfa_cache_without_extra_args("bar bar") == "bar bar" - refute Cache.get("foo") + assert delete_fn_cache("bar bar") == "bar bar" + assert_receive %{module: __MODULE__, function_name: :delete_fn_cache, args: ["bar bar"]} + refute Cache.get!("foo") end end - describe "option :cache with MFA and extra args" do - test "cacheable annotation" do - refute Cache.get("foo") - assert get_mfa_cache_with_extra_args("foo") == "foo" - assert Cache.get("foo") == "foo" - end - - test "cache_put annotation" do - :ok = Cache.put("foo", "bar") - - assert update_mfa_cache_with_extra_args("bar bar") == "bar bar" - assert Cache.get("foo") == "bar bar" - end + describe "option :cache raises an exception" do + test "due to invalid cache value" do + assert_raise ArgumentError, ~r"invalid value for :cache option", fn -> + defmodule RuntimeCacheTest do + use Nebulex.Caching - test "cache_evict annotation" do - :ok = Cache.put("foo", "bar") + @decorate cacheable(cache: 123, key: {a, b}) + def t(a, b), do: {a, b} + end - assert delete_mfa_cache_with_extra_args("bar bar") == "bar bar" - refute Cache.get("foo") + RuntimeCacheTest.t(1, 2) + end end end ## Annotated Functions - @decorate cacheable(cache: Cache) + @cache Cache + + @decorate cacheable(cache: @cache) def get_without_args, do: "hello" - @decorate cacheable(cache: Cache, key: x) - def get_by_x(x, y \\ nil) do + @decorate cacheable(cache: @cache, key: x) + def get_by_xy(x, y \\ nil) do with _ when not is_nil(x) <- x, _ when not is_nil(y) <- y do y end end - @decorate cacheable(cache: Cache, key: {x, y}) - def get_by_xy(x, y) do + @decorate cacheable(key: {x, y}) + def multiply_xy(x, y) do {x, y * 2} end @@ -705,41 +663,55 @@ defmodule Nebulex.CachingTest do x end - @decorate cacheable(cache: Cache) + @decorate cacheable() def get_false_with_side_effect(v) do - Cache.update("side-effect", 1, &(&1 + 1)) + _ = Cache.update!("side-effect", 1, &(&1 + 1)) + v end - @decorate cacheable(cache: Cache, match: fn x -> x != :x end) + @decorate cacheable(match: &(&1 != :x)) def get_with_match(x) do x end - @decorate cacheable(cache: Cache, match: &match_fun/1) + @decorate cacheable(cache: dynamic_cache(Cache, Cache), match: &match_fun/1) def get_with_match_fun(x) do {:ok, to_string(x)} rescue _ -> :error end - @decorate cacheable(cache: Cache) + @decorate cacheable(cache: dynamic_cache(Cache, Cache), match: &match_fun_with_ctx/2) + def get_with_match_fun_and_ctx(x) do + {:ok, to_string(x)} + rescue + _ -> :error + end + + @decorate cacheable(key: ttl, match: &match_fun/1) + def get_with_custom_ttl(ttl) do + {:ok, %{ttl: ttl}} + end + + @decorate cacheable() def get_with_default_key(x, y) do _ = {x, y} + :ok end - @decorate cacheable(cache: Cache, key: {Meta, meta.id}) + @decorate cacheable(key: {Meta, meta.id}) def get_meta(%Meta{} = meta) do meta end - @decorate cacheable(cache: Cache, key: map[:id]) + @decorate cacheable(key: map[:id]) def get_map(map) do map end - @decorate cache_put(cache: Cache) + @decorate cache_put() def update_without_args, do: "hello" @decorate cache_put(cache: Cache, key: x) @@ -750,26 +722,26 @@ defmodule Nebulex.CachingTest do end end - @decorate cache_put(cache: Cache, key: x, opts: [ttl: 1000]) + @decorate cache_put(cache: dynamic_cache(Cache, Cache), keys: [x], opts: [ttl: 1000]) def update_with_opts(x) do x end - @decorate cache_put(cache: Cache, key: x, match: &match_fun/1) + @decorate cache_put(cache: dynamic_cache(Cache, Cache), key: x, match: &match_fun/1) def update_with_match(x) do {:ok, to_string(x)} rescue _ -> :error end - @decorate cache_put(cache: Cache, keys: [x, y], match: &match_fun/1, opts: [ttl: 1000]) + @decorate cache_put(keys: [x, y], match: &match_fun/1, opts: [ttl: 1000]) def update_with_multiple_keys(x, y) do {:ok, {to_string(x), to_string(y)}} rescue _ -> :error end - @decorate cache_evict(cache: Cache) + @decorate cache_evict(cache: dynamic_cache(Cache, Cache)) def evict_without_args, do: "hello" @decorate cache_evict(cache: Cache, key: x) @@ -777,17 +749,17 @@ defmodule Nebulex.CachingTest do x end - @decorate cache_evict(cache: Cache, keys: [x, y]) + @decorate cache_evict(keys: [x, y]) def evict_keys_fun(x, y) do {x, y} end - @decorate cache_evict(cache: Cache, all_entries: true, before_invocation: true) + @decorate cache_evict(all_entries: true, before_invocation: true) def evict_all_fun(x) do x end - @decorate cacheable(cache: Cache, key: x) + @decorate cacheable(key: x) def multiple_clauses(x, y \\ 0) def multiple_clauses(x, y) when is_integer(x) and is_integer(y) do @@ -798,15 +770,17 @@ defmodule Nebulex.CachingTest do {x, y} end - @decorate cacheable(cache: Cache, key_generator: TestKeyGenerator) + ## Custom key generation + + @decorate cacheable(key: &:erlang.phash2/1) def get_with_keygen(x, y) do {x, y} end - @decorate cacheable(cache: Cache, key_generator: TestKeyGenerator) - def get_with_keygen2(x, y, z) + @decorate cacheable(key: &:erlang.phash2/1) + def get_with_keygen2(x, y, z \\ %{}) - def get_with_keygen2(x, y, %{a: {_x, _y}}) do + def get_with_keygen2(x, y, %{a: {_x1, _y1}}) do {x, y} end @@ -814,145 +788,121 @@ defmodule Nebulex.CachingTest do {x, y, z} end - @decorate cacheable(cache: Cache, key_generator: TestKeyGenerator) + @decorate cacheable(key: &:erlang.phash2/1) def get_with_keygen3(x, _y, _, {_, _}, [_], %{}, %{} = z) do {x, z} end - @decorate cache_evict(cache: Cache, key_generator: TestKeyGenerator) - def evict_with_keygen(x, y) do + @decorate cacheable(key: &:erlang.list_to_tuple([:a, :b | &1.args])) + def get_with_keygen4(x, y) do {x, y} end - @decorate cache_put(cache: Cache, key_generator: TestKeyGenerator) - def put_with_keygen(x, y) do - x * y - end - - @decorate cacheable(cache: CacheWithDefaultKeyGenerator) - def get_with_default_key_generator(id), do: id - - @decorate cache_evict(cache: CacheWithDefaultKeyGenerator) - def del_with_default_key_generator(id), do: id - - @decorate cacheable(cache: Cache, key_generator: {TestKeyGenerator, [x]}) - def get_with_shorthand_tuple_keygen(x, y, _z) do + @decorate cache_evict(key: &:erlang.phash2/1) + def evict_with_keygen(x, y) do {x, y} end - @decorate cacheable(cache: Cache, key_generator: {__MODULE__, [x]}) - def get_with_shorthand_tuple_keygen2(x, y) do + @decorate cache_evict(key: &:erlang.list_to_tuple(&1.args)) + def evict_with_keygen2(x, y) do {x, y} end - @decorate cache_evict(cache: Cache, key_generator: {TestKeyGenerator, [x]}) - def evict_with_shorthand_tuple_keygen(x, y) do - {x, y} + @decorate cache_put(key: &hd(&1.args)) + def put_with_keygen(x, y) do + x * y end - @decorate cache_put(cache: Cache, key_generator: {TestKeyGenerator, [x]}) - def put_with_shorthand_tuple_keygen(x, y) do - {x, y} + @decorate cache_put(key: &{:tuple, hd(&1.args)}) + def put_with_keygen2(x, y) do + x * y end - @decorate cacheable(cache: Cache, key_generator: {__MODULE__, :generate_key, [{x, y}]}) - def get_with_tuple_keygen(x, y) do - {x, y} - end + ## on_error - @decorate cacheable(cache: Cache, key_generator: {TestKeyGenerator, :generate, [:a, :b, [x]]}) - def get_with_tuple_keygen2(x, y) do - {x, y} + @decorate cacheable(cache: YetAnotherCache, key: x, on_error: :raise) + def get_and_raise_exception(x) do + x end - @decorate cache_evict(cache: Cache, key_generator: {__MODULE__, :generate_key, [{x, y}]}) - def evict_with_tuple_keygen(x, y) do - {x, y} + @decorate cache_put(cache: YetAnotherCache, key: x, on_error: :raise) + def update_and_raise_exception(x) do + x end - @decorate cache_put(cache: Cache, key_generator: {__MODULE__, :generate_key, [x]}) - def put_with_tuple_keygen(x, y) do - x * y + @decorate cache_evict(cache: YetAnotherCache, key: x, on_error: :raise) + def evict_and_raise_exception(x) do + x end - @decorate cacheable(cache: YetAnotherCache, key: x, on_error: :nothing) - def get_with_exception(x) do + @decorate cacheable(cache: YetAnotherCache, key: x) + def get_ignoring_exception(x) do x end - @decorate cache_put(cache: YetAnotherCache, key: x, on_error: :nothing) - def update_with_exception(x) do + @decorate cache_put(cache: YetAnotherCache, key: x) + def update_ignoring_exception(x) do x end - @decorate cache_evict(cache: YetAnotherCache, key: x, on_error: :nothing) - def evict_with_exception(x) do + @decorate cache_evict(cache: YetAnotherCache, key: x) + def evict_ignoring_exception(x) do x end - @decorate cacheable(cache: {__MODULE__, :cache_with_extra_args, ["extra_arg"]}, key: var) - def get_mfa_cache_with_extra_args(var) do - var - end + ## Runtime target cache - @decorate cacheable(cache: {__MODULE__, :cache_without_extra_args, []}, key: var) - def get_mfa_cache_without_extra_args(var) do + @decorate cacheable(cache: &target_cache/1, key: var) + def get_fn_cache(var) do var end - @decorate cache_put(cache: {__MODULE__, :cache_with_extra_args, ["extra_arg"]}, key: "foo") - def update_mfa_cache_with_extra_args(var) do + @decorate cache_put(cache: &target_cache/1, key: "foo") + def update_fn_cache(var) do var end - @decorate cache_put(cache: {__MODULE__, :cache_without_extra_args, []}, key: "foo") - def update_mfa_cache_without_extra_args(var) do + @decorate cache_evict(cache: &target_cache/1, key: "foo") + def delete_fn_cache(var) do var end - @decorate cache_evict(cache: {__MODULE__, :cache_with_extra_args, ["extra_arg"]}, key: "foo") - def delete_mfa_cache_with_extra_args(var) do - var + ## Key references + + @decorate cacheable(key: name, references: & &1.id) + def get_with_keyref(name) do + %{id: "referenced_id", name: name} end - @decorate cache_evict(cache: {__MODULE__, :cache_without_extra_args, []}, key: "foo") - def delete_mfa_cache_without_extra_args(var) do - var + @decorate cacheable(key: name, references: &:erlang.phash2({&1.id, &2.args})) + def get_with_keyref_fn_ctx(name) do + %{id: "referenced_id", name: name} end - @decorate cacheable(cache: Cache, key: name, references: & &1.id) - def get_with_referenced_key(name) do + @decorate cacheable(key: name, references: &keyref(YetAnotherCache, &1.id)) + def get_with_keyref_cache(name) do %{id: "referenced_id", name: name} end - @decorate cacheable(cache: Cache, key: name, references: attrs.id) - def get_with_referenced_key_from_args(name, attrs) do + @decorate cacheable(cache: dynamic_cache(Cache, Cache), key: name, references: attrs.id) + def get_with_keyref_from_args(name, attrs) do %{attrs: attrs, name: name} end - @decorate cacheable(cache: Cache, key: name, references: "fixed_id") - def get_with_fixed_referenced_key(name) do + @decorate cacheable(key: name, references: "fixed_id") + def get_with_fixed_keyref(name) do %{id: "fixed_id", name: name} end - @decorate cacheable(cache: Cache, key: name, references: &keyref(YetAnotherCache, &1.id)) - def get_with_ref_key_with_cache(name) do - %{id: "referenced_id", name: name} - end - - @decorate cacheable(cache: Cache, key: ttl, match: &match_fun/1) - def get_with_custom_ttl(ttl) do - {:ok, %{ttl: ttl}} - end - ## Helpers # Custom key-generator function def generate_key(arg), do: arg - @impl Nebulex.Caching.KeyGenerator - def generate(module, function_name, args) do - :erlang.phash2({module, function_name, args}) + def target_cache(arg) do + _ = send(self(), arg) + + Cache end def cache_with_extra_args(_mod, _fun, _args, _extra_arg), do: Cache @@ -966,11 +916,27 @@ defmodule Nebulex.CachingTest do defp match_fun({:ok, val}), do: {true, val} defp match_fun(_), do: false + defp match_fun_with_ctx(result, ctx) do + _ = send(self(), ctx) + + match_fun(result) + end + defp set_keys(entries) do assert :ok == Cache.put_all(entries) Enum.each(entries, fn {k, v} -> - assert v == Cache.get(k) + assert v == Cache.get!(k) end) end + + defp default_hash(fun, arity, args) do + %Nebulex.Caching.Decorators.Context{ + module: __MODULE__, + function_name: fun, + arity: arity, + args: args + } + |> :erlang.phash2() + end end diff --git a/test/nebulex/hook_test.exs b/test/nebulex/hook_test.exs deleted file mode 100644 index 64081d3e..00000000 --- a/test/nebulex/hook_test.exs +++ /dev/null @@ -1,159 +0,0 @@ -defmodule Nebulex.HookTest do - use ExUnit.Case, async: true - - alias Nebulex.Hook - - describe "before" do - defmodule BeforeHookCache do - @moduledoc false - use Nebulex.Hook - @decorate_all before(&Nebulex.HookTest.hook_fun/1) - - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - - test "hook" do - {:ok, _pid} = BeforeHookCache.start_link() - true = Process.register(self(), :hooked_cache) - _ = BeforeHookCache.new_generation() - - refute BeforeHookCache.get("foo") - assert_receive %Hook{} = hook, 200 - assert hook.step == :before - assert hook.module == BeforeHookCache - assert hook.name == :get - assert hook.arity == 2 - refute hook.return - - assert :ok == BeforeHookCache.put("foo", "bar") - assert_receive %Hook{} = hook, 200 - assert hook.step == :before - assert hook.module == BeforeHookCache - assert hook.name == :put - assert hook.arity == 3 - refute hook.return - - :ok = BeforeHookCache.stop() - end - end - - describe "after_return" do - defmodule AfterReturnHookCache do - @moduledoc false - use Nebulex.Hook - @decorate_all after_return(&Nebulex.HookTest.hook_fun/1) - - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - - test "hook" do - {:ok, _pid} = AfterReturnHookCache.start_link() - true = Process.register(self(), :hooked_cache) - _ = AfterReturnHookCache.new_generation() - - refute AfterReturnHookCache.get("foo") - assert_receive %Hook{} = hook, 200 - assert hook.module == AfterReturnHookCache - assert hook.name == :get - assert hook.arity == 2 - assert hook.step == :after_return - refute hook.return - - assert :ok == AfterReturnHookCache.put("foo", "bar") - assert_receive %Hook{} = hook, 200 - assert hook.module == AfterReturnHookCache - assert hook.name == :put - assert hook.arity == 3 - assert hook.step == :after_return - assert hook.return == :ok - - :ok = AfterReturnHookCache.stop() - end - end - - describe "around" do - defmodule AroundHookCache do - @moduledoc false - use Nebulex.Hook - @decorate_all around(&Nebulex.TestCache.TestHook.track/1) - - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - - alias Nebulex.TestCache.TestHook - - def init(opts) do - {:ok, pid} = TestHook.start_link() - {:ok, Keyword.put(opts, :hook_pid, pid)} - end - end - - test "hook" do - {:ok, _pid} = AroundHookCache.start_link() - true = Process.register(self(), :hooked_cache) - _ = AroundHookCache.new_generation() - - refute AroundHookCache.get("foo") - assert_receive %Hook{module: AroundHookCache, name: :get, arity: 2} = hook, 200 - refute hook.return - assert hook.acc >= 0 - - assert :ok == AroundHookCache.put("foo", "bar") - assert_receive %Hook{module: AroundHookCache, name: :put, arity: 3} = hook, 200 - assert hook.acc >= 0 - assert hook.return == :ok - - assert :ok == AroundHookCache.put("hello", "world") - assert_receive %Hook{module: AroundHookCache, name: :put, arity: 3} = hook, 200 - assert hook.acc >= 0 - assert hook.return == :ok - - assert "bar" == AroundHookCache.get("foo") - assert_receive %Hook{module: AroundHookCache, name: :get, arity: 2} = hook, 200 - assert hook.return == "bar" - assert hook.acc >= 0 - - assert "world" == AroundHookCache.get("hello") - assert_receive %Hook{module: AroundHookCache, name: :get, arity: 2} = hook, 200 - assert hook.return == "world" - assert hook.acc >= 0 - - :ok = AroundHookCache.stop() - end - end - - describe "exception" do - defmodule ErrorCache do - @moduledoc false - use Nebulex.Hook - @decorate_all around(&Nebulex.TestCache.TestHook.hook_error/1) - - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - - test "hook" do - {:ok, _pid} = ErrorCache.start_link() - - assert_raise RuntimeError, ~r"hook execution failed on step :before with error", fn -> - ErrorCache.get("foo") - end - - :ok = ErrorCache.stop() - end - end - - ## Helpers - - def hook_fun(%Hook{name: name} = hook) when name in [:get, :put] do - send(self(), hook) - end - - def hook_fun(hook), do: hook -end diff --git a/test/nebulex/telemetry_test.exs b/test/nebulex/telemetry_test.exs index bd4c4fb5..92f97646 100644 --- a/test/nebulex/telemetry_test.exs +++ b/test/nebulex/telemetry_test.exs @@ -10,126 +10,61 @@ defmodule Nebulex.TelemetryTest do defmodule Cache do use Nebulex.Cache, otp_app: :nebulex, - adapter: Nebulex.Adapters.Multilevel - - defmodule L1 do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - - defmodule L2 do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Replicated - end - - defmodule L3 do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Partitioned - end + adapter: Nebulex.TestAdapter end ## Shared constants @prefix [:nebulex, :telemetry_test, :cache] - @start @prefix ++ [:command, :start] @stop @prefix ++ [:command, :stop] - - @start_events [ - @prefix ++ [:command, :start], - @prefix ++ [:l1, :command, :start], - @prefix ++ [:l2, :command, :start], - @prefix ++ [:l2, :primary, :command, :start], - @prefix ++ [:l3, :command, :start], - @prefix ++ [:l3, :primary, :command, :start] - ] - - @stop_events [ - @prefix ++ [:command, :stop], - @prefix ++ [:l1, :command, :stop], - @prefix ++ [:l2, :command, :stop], - @prefix ++ [:l2, :primary, :command, :stop], - @prefix ++ [:l3, :command, :stop], - @prefix ++ [:l3, :primary, :command, :stop] - ] - - @exception_events [ - @prefix ++ [:command, :exception], - @prefix ++ [:l1, :command, :exception], - @prefix ++ [:l2, :command, :exception], - @prefix ++ [:l2, :primary, :command, :exception], - @prefix ++ [:l3, :command, :stop], - @prefix ++ [:l3, :primary, :command, :exception] - ] - - @caches [Cache, Cache.L1, Cache.L2, Cache.L2.Primary, Cache.L3, Cache.L3.Primary] - - @events Enum.zip([@caches, @start_events, @stop_events]) - - @config [ - model: :inclusive, - levels: [ - {Cache.L1, gc_interval: :timer.hours(1)}, - {Cache.L2, primary: [gc_interval: :timer.hours(1)]}, - {Cache.L3, primary: [gc_interval: :timer.hours(1)]} - ] - ] + @exception @prefix ++ [:command, :exception] + @test_adapter_start [:nebulex, :test_adapter, :start] + @events [@start, @stop, @exception, @test_adapter_start] ## Tests describe "span/3" do - setup_with_cache(Cache, @config) + setup_with_cache Cache test "ok: emits start and stop events" do - with_telemetry_handler(__MODULE__, @start_events ++ @stop_events, fn -> + with_telemetry_handler(__MODULE__, @events, fn -> assert Cache.put("foo", "bar") == :ok - for {cache, start, stop} <- @events do - assert_receive {^start, measurements, %{function_name: :put} = metadata} - assert measurements[:system_time] |> DateTime.from_unix!(:native) - assert metadata[:adapter_meta][:cache] == cache - assert metadata[:args] == ["foo", "bar", :infinity, :put, []] - assert metadata[:telemetry_span_context] |> is_reference() - - assert_receive {^stop, measurements, %{function_name: :put} = metadata} - assert measurements[:duration] > 0 - assert metadata[:adapter_meta][:cache] == cache - assert metadata[:args] == ["foo", "bar", :infinity, :put, []] - assert metadata[:result] == true - assert metadata[:telemetry_span_context] |> is_reference() - end + assert_receive {@start, measurements, %{command: :put} = metadata} + assert measurements[:system_time] |> DateTime.from_unix!(:native) + assert metadata[:adapter_meta][:cache] == Cache + assert metadata[:args] == ["foo", "bar", :infinity, :put, []] + assert metadata[:telemetry_span_context] |> is_reference() + assert metadata[:extra_metadata] == %{} + + assert_receive {@stop, measurements, %{command: :put} = metadata} + assert measurements[:duration] > 0 + assert metadata[:adapter_meta][:cache] == Cache + assert metadata[:args] == ["foo", "bar", :infinity, :put, []] + assert metadata[:result] == {:ok, true} + assert metadata[:telemetry_span_context] |> is_reference() + assert metadata[:extra_metadata] == %{} end) end test "raise: emits start and exception events" do - with_telemetry_handler(__MODULE__, @exception_events, fn -> - Adapter.with_meta(Cache.L3.Primary, fn _, meta -> - true = :ets.delete(meta.meta_tab) - end) + with_telemetry_handler(__MODULE__, @events, fn -> + key = {:eval, fn -> raise ArgumentError, "error" end} assert_raise ArgumentError, fn -> - Cache.get("foo") + Cache.fetch(key) end - ex_events = [ - @prefix ++ [:command, :exception], - @prefix ++ [:l3, :command, :exception], - @prefix ++ [:l3, :primary, :command, :exception] - ] - - for {cache, exception} <- ex_events do - assert_receive {^exception, measurements, %{function_name: :get} = metadata} - assert measurements[:duration] > 0 - assert metadata[:adapter_meta][:cache] == cache - assert metadata[:args] == ["foo", []] - assert metadata[:kind] == :error - assert metadata[:reason] == :badarg - assert metadata[:stacktrace] - assert metadata[:telemetry_span_context] |> is_reference() - end + assert_receive {@exception, measurements, %{command: :fetch} = metadata} + assert measurements[:duration] > 0 + assert metadata[:adapter_meta][:cache] == Cache + assert metadata[:args] == [key, []] + assert metadata[:kind] == :error + assert metadata[:reason] == %ArgumentError{message: "error"} + assert metadata[:stacktrace] + assert metadata[:telemetry_span_context] |> is_reference() + assert metadata[:extra_metadata] == %{} end) end @@ -153,23 +88,20 @@ defmodule Nebulex.TelemetryTest do end describe "span/3 bypassed" do - setup_with_cache(Cache, Keyword.put(@config, :telemetry, false)) + setup_with_cache Cache, telemetry: false test "telemetry set to false" do - for cache <- @caches do - Adapter.with_meta(cache, fn _, meta -> - assert meta.telemetry == false - end) - end + Adapter.with_meta(Cache, fn meta -> + assert meta.telemetry == false + end) end test "ok: does not emit start and stop events" do - with_telemetry_handler(__MODULE__, @start_events ++ @stop_events, fn -> + with_telemetry_handler(__MODULE__, @events, fn -> commands = [ put: ["foo", "bar"], put_all: [%{"foo foo" => "bar bar"}], get: ["foo"], - get_all: [["foo", "foo foo"]], delete: ["unknown"], take: ["foo foo"], has_key?: ["foo foo"], @@ -177,35 +109,98 @@ defmodule Nebulex.TelemetryTest do ttl: ["foo"], expire: ["foo", 60_000], touch: ["foo"], - all: [], + get_all: [{:in, ["foo", "foo foo"]}], + get_all: [], stream: [], transaction: [fn -> :ok end], in_transaction?: [], dump: ["/invalid/path"], load: ["wrong_file"], - stats: [] + info: [] ] for {command, args} <- commands do - _ = apply(Cache.L1, command, args) - _ = apply(Cache.L2, command, args) - _ = apply(Cache.L3, command, args) - - for {_cache, start, stop} <- @events do - refute_received {^start, _, %{function_name: :command}} - refute_received {^stop, _, %{function_name: :command}} - end + _ = apply(Cache, command, args) + + refute_received {@start, _, %{command: :command}} + refute_received {@stop, _, %{command: :command}} end for {command, args} <- Keyword.drop(commands, [:dump, :load]) do _ = apply(Cache, command, args) - for {_cache, start, stop} <- @events do - refute_received {^start, _, %{function_name: :command}} - refute_received {^stop, _, %{function_name: :command}} - end + refute_received {@start, _, %{command: :command}} + refute_received {@stop, _, %{command: :command}} end end) end end + + describe "span/3 with custom event and metadata" do + @custom_prefix [:my, :custom, :event] + @custom_start @custom_prefix ++ [:start] + @custom_stop @custom_prefix ++ [:stop] + @custom_exception @custom_prefix ++ [:exception] + @custom_events [@custom_start, @custom_stop, @custom_exception] + + @custom_opts [ + telemetry_event: @custom_prefix, + telemetry_metadata: %{foo: "bar"} + ] + + setup_with_cache Cache + + test "ok: emits start and stop events" do + with_telemetry_handler(__MODULE__, @custom_events, fn -> + :ok = Cache.put("foo", "bar", @custom_opts) + + assert_receive {@custom_start, measurements, %{command: :put} = metadata} + assert measurements[:system_time] |> DateTime.from_unix!(:native) + assert metadata[:adapter_meta][:cache] == Cache + assert metadata[:args] == ["foo", "bar", :infinity, :put, @custom_opts] + assert metadata[:telemetry_span_context] |> is_reference() + assert metadata[:extra_metadata] == %{foo: "bar"} + + assert_receive {@custom_stop, measurements, %{command: :put} = metadata} + assert measurements[:duration] > 0 + assert metadata[:adapter_meta][:cache] == Cache + assert metadata[:args] == ["foo", "bar", :infinity, :put, @custom_opts] + assert metadata[:result] == {:ok, true} + assert metadata[:telemetry_span_context] |> is_reference() + assert metadata[:extra_metadata] == %{foo: "bar"} + end) + end + + test "raise: emits start and exception events" do + with_telemetry_handler(__MODULE__, @custom_events, fn -> + key = {:eval, fn -> raise ArgumentError, "error" end} + + assert_raise ArgumentError, fn -> + Cache.fetch(key, @custom_opts) + end + + assert_receive {@custom_exception, measurements, %{command: :fetch} = metadata} + assert measurements[:duration] > 0 + assert metadata[:adapter_meta][:cache] == Cache + assert metadata[:args] == [key, @custom_opts] + assert metadata[:kind] == :error + assert metadata[:reason] == %ArgumentError{message: "error"} + assert metadata[:stacktrace] + assert metadata[:telemetry_span_context] |> is_reference() + assert metadata[:extra_metadata] == %{foo: "bar"} + end) + end + + test "error: invalid telemetry_event" do + assert_raise ArgumentError, ~r"invalid value for :telemetry_event option", fn -> + Cache.fetch(:invalid, telemetry_event: :invalid) + end + end + + test "error: invalid telemetry_metadata" do + assert_raise ArgumentError, ~r"invalid value for :telemetry_metadata option", fn -> + Cache.fetch(:invalid, telemetry_metadata: :invalid) + end + end + end end diff --git a/test/nebulex/utils_test.exs b/test/nebulex/utils_test.exs new file mode 100644 index 00000000..d2acfd6e --- /dev/null +++ b/test/nebulex/utils_test.exs @@ -0,0 +1,4 @@ +defmodule Nebulex.UtilsTest do + use ExUnit.Case, async: true + doctest Nebulex.Utils +end diff --git a/test/shared/cache/deprecated_test.exs b/test/shared/cache/deprecated_test.exs deleted file mode 100644 index d8890c66..00000000 --- a/test/shared/cache/deprecated_test.exs +++ /dev/null @@ -1,31 +0,0 @@ -defmodule Nebulex.Cache.DeprecatedTest do - import Nebulex.CacheCase - - deftests do - describe "size/0" do - test "returns the current number of entries in cache", %{cache: cache} do - for x <- 1..100, do: cache.put(x, x) - assert cache.size() == 100 - - for x <- 1..50, do: cache.delete(x) - assert cache.size() == 50 - - for x <- 51..60, do: assert(cache.get(x) == x) - assert cache.size() == 50 - end - end - - describe "flush/0" do - test "evicts all entries from cache", %{cache: cache} do - Enum.each(1..2, fn _ -> - for x <- 1..100, do: cache.put(x, x) - - assert cache.flush() == 100 - :ok = Process.sleep(500) - - for x <- 1..100, do: refute(cache.get(x)) - end) - end - end - end -end diff --git a/test/shared/cache/entry_expiration_test.exs b/test/shared/cache/entry_expiration_test.exs deleted file mode 100644 index 90f34e15..00000000 --- a/test/shared/cache/entry_expiration_test.exs +++ /dev/null @@ -1,226 +0,0 @@ -defmodule Nebulex.Cache.EntryExpirationTest do - import Nebulex.CacheCase - - deftests do - describe "ttl option is given to" do - test "put", %{cache: cache} do - assert cache.put("foo", "bar", ttl: 500) == :ok - assert cache.has_key?("foo") - - Process.sleep(600) - refute cache.has_key?("foo") - end - - test "put_all", %{cache: cache} do - entries = [{0, nil} | for(x <- 1..3, do: {x, x})] - assert cache.put_all(entries, ttl: 1000) - - refute cache.get(0) - for x <- 1..3, do: assert(x == cache.get(x)) - :ok = Process.sleep(1200) - for x <- 1..3, do: refute(cache.get(x)) - end - - test "put_new_all", %{cache: cache} do - assert cache.put_new_all(%{"apples" => 1, "bananas" => 3}, ttl: 1000) - assert cache.get("apples") == 1 - assert cache.get("bananas") == 3 - - refute cache.put_new_all(%{"apples" => 3, "oranges" => 1}) - assert cache.get("apples") == 1 - assert cache.get("bananas") == 3 - refute cache.get("oranges") - - :ok = Process.sleep(1200) - refute cache.get("apples") - refute cache.get("bananas") - end - - test "take", %{cache: cache} do - :ok = cache.put("foo", "bar", ttl: 500) - :ok = Process.sleep(600) - - refute cache.take(1) - end - - test "take!", %{cache: cache} do - :ok = cache.put(1, 1, ttl: 100) - :ok = Process.sleep(500) - - assert_raise KeyError, fn -> - cache.take!(1) - end - end - - test "incr (initializes default value if ttl is expired)", %{cache: cache} do - assert cache.incr(:counter, 1, ttl: 200) == 1 - assert cache.incr(:counter) == 2 - - :ok = Process.sleep(210) - - assert cache.incr(:counter, 1, ttl: 200) == 1 - assert cache.incr(:counter) == 2 - end - end - - describe "ttl" do - test "returns the remaining ttl for the given key", %{cache: cache} do - assert cache.put(:a, 1, ttl: 500) == :ok - assert cache.ttl(:a) > 0 - assert cache.put(:b, 2) == :ok - - :ok = Process.sleep(10) - assert cache.ttl(:a) > 0 - assert cache.ttl(:b) == :infinity - - :ok = Process.sleep(600) - refute cache.ttl(:a) - assert cache.ttl(:b) == :infinity - end - - test "returns nil if key does not exist", %{cache: cache} do - refute cache.ttl(:non_existent) - end - end - - describe "expire" do - test "alters the expiration time for the given key", %{cache: cache} do - assert cache.put(:a, 1, ttl: 500) == :ok - assert cache.ttl(:a) > 0 - - assert cache.expire(:a, 1000) - assert cache.ttl(:a) > 100 - - assert cache.expire(:a, :infinity) - assert cache.ttl(:a) == :infinity - - refute cache.expire(:b, 5) - end - - test "returns false if key does not exist", %{cache: cache} do - assert cache.expire(:non_existent, 1000) == false - end - - test "raises when ttl is invalid", %{cache: cache} do - assert_raise ArgumentError, ~r"expected ttl to be a valid timeout", fn -> - cache.expire(:a, "hello") - end - end - end - - describe "touch" do - test "updates the last access time for the given entry", %{cache: cache} do - assert cache.put(:touch, 1, ttl: 1000) == :ok - - :ok = Process.sleep(100) - assert cache.touch(:touch) - - :ok = Process.sleep(200) - assert cache.touch(:touch) - assert cache.get(:touch) == 1 - - :ok = Process.sleep(1100) - refute cache.get(:touch) - end - - test "returns false if key does not exist", %{cache: cache} do - assert cache.touch(:non_existent) == false - end - end - - describe "expiration" do - test "single entry put with ttl", %{cache: cache} do - assert cache.put(1, 11, ttl: 1000) == :ok - assert cache.get!(1) == 11 - - for _ <- 3..1 do - assert cache.ttl(1) > 0 - Process.sleep(200) - end - - :ok = Process.sleep(500) - refute cache.ttl(1) - assert cache.put(1, 11, ttl: 1000) == :ok - assert cache.ttl(1) > 0 - end - - test "multiple entries put with ttl", %{cache: cache} do - assert cache.put(1, 11, ttl: 1000) == :ok - assert cache.get!(1) == 11 - - :ok = Process.sleep(10) - assert cache.get(1) == 11 - :ok = Process.sleep(1100) - refute cache.get(1) - - ops = [ - put: ["foo", "bar", [ttl: 1000]], - put_all: [[{"foo", "bar"}], [ttl: 1000]] - ] - - for {action, args} <- ops do - assert apply(cache, action, args) == :ok - :ok = Process.sleep(10) - assert cache.get("foo") == "bar" - :ok = Process.sleep(1200) - refute cache.get("foo") - - assert apply(cache, action, args) == :ok - :ok = Process.sleep(10) - assert cache.get("foo") == "bar" - :ok = Process.sleep(1200) - refute cache.get("foo") - end - end - end - - describe "get_and_update with ttl" do - test "existing entry", %{cache: cache} do - assert cache.put(1, 1, ttl: 1000) == :ok - assert cache.ttl(1) > 0 - - :ok = Process.sleep(10) - - assert cache.get_and_update(1, &cache.get_and_update_fun/1) == {1, 2} - assert cache.ttl(1) == :infinity - - :ok = Process.sleep(1200) - assert cache.get(1) == 2 - end - end - - describe "update with ttl" do - test "existing entry", %{cache: cache} do - assert cache.put(1, 1, ttl: 1000) == :ok - assert cache.ttl(1) > 0 - - :ok = Process.sleep(10) - - assert cache.update(1, 10, &Integer.to_string/1) == "1" - assert cache.ttl(1) == :infinity - - :ok = Process.sleep(1200) - assert cache.get(1) == "1" - end - end - - describe "incr with ttl" do - test "increments a counter", %{cache: cache} do - assert cache.incr(:counter, 1, ttl: 1000) == 1 - assert cache.ttl(1) > 0 - - :ok = Process.sleep(1200) - refute cache.get(:counter) - end - - test "increments a counter and then set ttl", %{cache: cache} do - assert cache.incr(:counter, 1) == 1 - assert cache.ttl(:counter) == :infinity - - assert cache.expire(:counter, 500) - :ok = Process.sleep(600) - refute cache.get(:counter) - end - end - end -end diff --git a/test/shared/cache/entry_prop_test.exs b/test/shared/cache/entry_prop_test.exs deleted file mode 100644 index ccdc1f2b..00000000 --- a/test/shared/cache/entry_prop_test.exs +++ /dev/null @@ -1,32 +0,0 @@ -defmodule Nebulex.Cache.EntryPropTest do - import Nebulex.CacheCase - - deftests do - use ExUnitProperties - - describe "key/value entries" do - property "any term", %{cache: cache} do - check all term <- term() do - refute cache.get(term) - - refute cache.replace(term, term) - assert cache.put(term, term) == :ok - refute cache.put_new(term, term) - assert cache.get(term) == term - - assert cache.replace(term, "replaced") - assert cache.get(term) == "replaced" - - assert cache.take(term) == "replaced" - refute cache.take(term) - - assert cache.put_new(term, term) - assert cache.get(term) == term - - assert cache.delete(term) == :ok - refute cache.get(term) - end - end - end - end -end diff --git a/test/shared/cache/entry_test.exs b/test/shared/cache/entry_test.exs deleted file mode 100644 index f1e045c2..00000000 --- a/test/shared/cache/entry_test.exs +++ /dev/null @@ -1,385 +0,0 @@ -defmodule Nebulex.Cache.EntryTest do - import Nebulex.CacheCase - - deftests do - describe "put/3" do - test "puts the given entry into the cache", %{cache: cache} do - for x <- 1..4, do: assert(cache.put(x, x) == :ok) - - assert cache.get(1) == 1 - assert cache.get(2) == 2 - - for x <- 3..4, do: assert(cache.put(x, x * x) == :ok) - assert cache.get(3) == 9 - assert cache.get(4) == 16 - end - - test "nil value has not any effect", %{cache: cache} do - assert cache.put("foo", nil) == :ok - refute cache.get("foo") - end - - test "raises when invalid option is given", %{cache: cache} do - assert_raise ArgumentError, ~r"expected ttl: to be a valid timeout", fn -> - cache.put("hello", "world", ttl: "1") - end - end - end - - describe "put_new/3" do - test "puts the given entry into the cache if the key does not exist", %{cache: cache} do - assert cache.put_new("foo", "bar") - assert cache.get("foo") == "bar" - end - - test "do nothing if key does exist already", %{cache: cache} do - :ok = cache.put("foo", "bar") - - refute cache.put_new("foo", "bar bar") - assert cache.get("foo") == "bar" - end - - test "nil value has not any effect", %{cache: cache} do - assert cache.put_new(:mykey, nil) - refute cache.get(:mykey) - end - - test "raises when invalid option is given", %{cache: cache} do - assert_raise ArgumentError, ~r"expected ttl: to be a valid timeout", fn -> - cache.put_new("hello", "world", ttl: "1") - end - end - end - - describe "put_new!/3" do - test "puts the given entry into the cache if the key does not exist", %{cache: cache} do - assert cache.put_new!("hello", "world") - assert cache.get("hello") == "world" - end - - test "raises when the key does exist in cache", %{cache: cache} do - :ok = cache.put("hello", "world") - - message = ~r"key \"hello\" already exists in cache" - - assert_raise Nebulex.KeyAlreadyExistsError, message, fn -> - cache.put_new!("hello", "world world") - end - end - end - - describe "replace/3" do - test "replaces the cached entry with a new value", %{cache: cache} do - refute cache.replace("foo", "bar") - - assert cache.put("foo", "bar") == :ok - assert cache.get("foo") == "bar" - - assert cache.replace("foo", "bar bar") - assert cache.get("foo") == "bar bar" - end - - test "nil value has not any effect", %{cache: cache} do - :ok = cache.put("hello", "world") - - assert cache.replace("hello", nil) - assert cache.get("hello") == "world" - end - - test "raises when invalid option is given", %{cache: cache} do - assert_raise ArgumentError, ~r"expected ttl: to be a valid timeout", fn -> - cache.replace("hello", "world", ttl: "1") - end - end - end - - describe "replace!/3" do - test "replaces the cached entry with a new value", %{cache: cache} do - :ok = cache.put("foo", "bar") - - assert cache.replace!("foo", "bar bar") - assert cache.get("foo") == "bar bar" - end - - test "raises when the key does not exist in cache", %{cache: cache} do - assert_raise KeyError, fn -> - cache.replace!("foo", "bar") - end - end - end - - describe "put_all/2" do - test "puts the given entries at once", %{cache: cache} do - assert cache.put_all(%{"apples" => 1, "bananas" => 3}) - assert cache.put_all(blueberries: 2, strawberries: 5) - assert cache.get("apples") == 1 - assert cache.get("bananas") == 3 - assert cache.get(:blueberries) == 2 - assert cache.get(:strawberries) == 5 - end - - test "empty list or map has not any effect", %{cache: cache} do - assert cache.put_all([]) - assert cache.put_all(%{}) - assert count = cache.count_all() - assert cache.delete_all() == count - end - - test "puts the given entries using different data types at once", %{cache: cache} do - entries = - Enum.reduce(1..100, %{}, fn elem, acc -> - sample = %{ - elem => elem, - :"atom#{elem}" => elem, - "#{elem}" => elem, - {:tuple, elem} => elem, - <<100, elem>> => elem, - [elem] => elem - } - - Map.merge(acc, sample) - end) - - assert cache.put_all(entries) == :ok - for {k, v} <- entries, do: assert(cache.get(k) == v) - end - - test "raises when invalid option is given", %{cache: cache} do - assert_raise ArgumentError, ~r"expected ttl: to be a valid timeout", fn -> - cache.put_all(%{"apples" => 1, "bananas" => 3}, ttl: "1") - end - end - end - - describe "put_new_all/2" do - test "puts the given entries only if none of the keys does exist already", %{cache: cache} do - assert cache.put_new_all(%{"apples" => 1, "bananas" => 3}) - assert cache.get("apples") == 1 - assert cache.get("bananas") == 3 - - refute cache.put_new_all(%{"apples" => 3, "oranges" => 1}) - assert cache.get("apples") == 1 - assert cache.get("bananas") == 3 - refute cache.get("oranges") - end - - test "raises when invalid option is given", %{cache: cache} do - assert_raise ArgumentError, ~r"expected ttl: to be a valid timeout", fn -> - cache.put_all(%{"apples" => 1, "bananas" => 3}, ttl: "1") - end - end - end - - describe "get/2" do - test "retrieves a cached entry", %{cache: cache} do - for x <- 1..5 do - :ok = cache.put(x, x) - assert cache.get(x) == x - end - end - - test "returns nil if key does not exist in cache", %{cache: cache} do - refute cache.get("non-existent") - end - end - - describe "get!/2" do - test "retrieves a cached entry", %{cache: cache} do - for x <- 1..5 do - :ok = cache.put(x, x) - assert cache.get!(x) == x - end - end - - test "raises when the key does not exist in cache", %{cache: cache} do - assert_raise KeyError, fn -> - cache.get!("non-existent") - end - end - end - - describe "get_all/2" do - test "returns a map with the given keys", %{cache: cache} do - assert cache.put_all(a: 1, c: 3) - assert cache.get_all([:a, :b, :c]) == %{a: 1, c: 3} - assert cache.delete_all() == 2 - end - - test "returns an empty map when none of the given keys is in cache", %{cache: cache} do - assert map_size(cache.get_all(["foo", "bar", 1, :a])) == 0 - end - - test "returns an empty map when the given key list is empty", %{cache: cache} do - assert map_size(cache.get_all([])) == 0 - end - end - - describe "delete/2" do - test "deletes the given key", %{cache: cache} do - for x <- 1..3, do: cache.put(x, x * 2) - - assert cache.get(1) == 2 - assert cache.delete(1) == :ok - refute cache.get(1) - - assert cache.get(2) == 4 - assert cache.get(3) == 6 - - assert cache.delete(:non_existent) == :ok - refute cache.get(:non_existent) - end - end - - describe "take/2" do - test "returns the given key and removes it from cache", %{cache: cache} do - for x <- 1..5 do - :ok = cache.put(x, x) - assert cache.take(x) == x - refute cache.take(x) - end - end - - test "returns nil if the key does not exist in cache", %{cache: cache} do - refute cache.take(:non_existent) - refute cache.take(nil) - end - end - - describe "take!/2" do - test "returns the given key and removes it from cache", %{cache: cache} do - assert cache.put(1, 1) == :ok - assert cache.take!(1) == 1 - end - - test "raises when the key does not exist in cache", %{cache: cache} do - assert_raise KeyError, fn -> - cache.take!(:non_existent) - end - - assert_raise KeyError, fn -> - cache.take!(nil) - end - end - end - - describe "has_key?/1" do - test "returns true if key does exist in cache", %{cache: cache} do - for x <- 1..5 do - :ok = cache.put(x, x) - assert cache.has_key?(x) - end - end - - test "returns false if key does not exist in cache", %{cache: cache} do - refute cache.has_key?(:non_existent) - refute cache.has_key?(nil) - end - end - - describe "update/4" do - test "updates an entry under a key applying a function on the value", %{cache: cache} do - :ok = cache.put("foo", "123") - :ok = cache.put("bar", "foo") - - assert cache.update("foo", 1, &String.to_integer/1) == 123 - assert cache.update("bar", "init", &String.to_atom/1) == :foo - end - - test "creates the entry with the default value if key does not exist", %{cache: cache} do - assert cache.update("foo", "123", &Integer.to_string/1) == "123" - end - - test "has not any effect if the given value is nil", %{cache: cache} do - refute cache.update("bar", nil, &Integer.to_string/1) - refute cache.get("bar") - end - end - - describe "incr/3" do - test "increments a counter by the given amount", %{cache: cache} do - assert cache.incr(:counter) == 1 - assert cache.incr(:counter) == 2 - assert cache.incr(:counter, 2) == 4 - assert cache.incr(:counter, 3) == 7 - assert cache.incr(:counter, 0) == 7 - - assert :counter |> cache.get() |> to_int() == 7 - - assert cache.incr(:counter, -1) == 6 - assert cache.incr(:counter, -1) == 5 - assert cache.incr(:counter, -2) == 3 - assert cache.incr(:counter, -3) == 0 - end - - test "increments a counter by the given amount with default", %{cache: cache} do - assert cache.incr(:counter1, 1, default: 10) == 11 - assert cache.incr(:counter2, 2, default: 10) == 12 - assert cache.incr(:counter3, -2, default: 10) == 8 - end - - test "increments a counter by the given amount ignoring the default", %{cache: cache} do - assert cache.incr(:counter) == 1 - assert cache.incr(:counter, 1, default: 10) == 2 - assert cache.incr(:counter, -1, default: 100) == 1 - end - - test "raises when amount is invalid", %{cache: cache} do - assert_raise ArgumentError, ~r"expected amount to be an integer", fn -> - cache.incr(:counter, "foo") - end - end - - test "raises when default is invalid", %{cache: cache} do - assert_raise ArgumentError, ~r"expected default: to be an integer", fn -> - cache.incr(:counter, 1, default: :invalid) - end - end - end - - describe "decr/3" do - test "decrements a counter by the given amount", %{cache: cache} do - assert cache.decr(:counter) == -1 - assert cache.decr(:counter) == -2 - assert cache.decr(:counter, 2) == -4 - assert cache.decr(:counter, 3) == -7 - assert cache.decr(:counter, 0) == -7 - - assert :counter |> cache.get() |> to_int() == -7 - - assert cache.decr(:counter, -1) == -6 - assert cache.decr(:counter, -1) == -5 - assert cache.decr(:counter, -2) == -3 - assert cache.decr(:counter, -3) == 0 - end - - test "decrements a counter by the given amount with default", %{cache: cache} do - assert cache.decr(:counter1, 1, default: 10) == 9 - assert cache.decr(:counter2, 2, default: 10) == 8 - assert cache.decr(:counter3, -2, default: 10) == 12 - end - - test "decrements a counter by the given amount ignoring the default", %{cache: cache} do - assert cache.decr(:counter) == -1 - assert cache.decr(:counter, 1, default: 10) == -2 - assert cache.decr(:counter, -1, default: 100) == -1 - end - - test "raises when amount is invalid", %{cache: cache} do - assert_raise ArgumentError, ~r"expected amount to be an integer", fn -> - cache.decr(:counter, "foo") - end - end - - test "raises when default is invalid", %{cache: cache} do - assert_raise ArgumentError, ~r"expected default: to be an integer", fn -> - cache.decr(:counter, 1, default: :invalid) - end - end - end - - ## Helpers - - defp to_int(data) when is_integer(data), do: data - defp to_int(data) when is_binary(data), do: String.to_integer(data) - end -end diff --git a/test/shared/cache/kv_error_test.exs b/test/shared/cache/kv_error_test.exs new file mode 100644 index 00000000..85851e61 --- /dev/null +++ b/test/shared/cache/kv_error_test.exs @@ -0,0 +1,196 @@ +defmodule Nebulex.Cache.KVErrorTest do + import Nebulex.CacheCase + + deftests do + import Nebulex.CacheCase, only: [assert_error_module: 2, assert_error_reason: 2] + + describe "put/3" do + test "returns an error", %{cache: cache} = ctx do + assert {:error, %Nebulex.Error{module: module, reason: reason}} = + cache.put("hello", "world") + + assert_error_module(ctx, module) + assert_error_reason(ctx, reason) + end + end + + describe "put_new/3" do + test "returns an error", %{cache: cache} = ctx do + assert {:error, %Nebulex.Error{module: module, reason: reason}} = + cache.put_new("hello", "world") + + assert_error_module(ctx, module) + assert_error_reason(ctx, reason) + end + end + + describe "put_new!/3" do + test "raises an error", %{cache: cache} do + assert_raise Nebulex.Error, fn -> + cache.put_new!("hello", "world") + end + end + end + + describe "replace/3" do + test "returns an error", %{cache: cache} = ctx do + assert {:error, %Nebulex.Error{module: module, reason: reason}} = + cache.replace("hello", "world") + + assert_error_module(ctx, module) + assert_error_reason(ctx, reason) + end + end + + describe "replace!/3" do + test "raises an error", %{cache: cache} do + assert_raise Nebulex.Error, fn -> + cache.replace!("hello", "world") + end + end + end + + describe "put_all/2" do + test "returns an error", %{cache: cache} = ctx do + assert {:error, %Nebulex.Error{module: module, reason: reason}} = + cache.put_all(%{"apples" => 1, "bananas" => 3}) + + assert_error_module(ctx, module) + assert_error_reason(ctx, reason) + end + end + + describe "put_all!/2" do + test "raises an error", %{cache: cache} do + assert_raise Nebulex.Error, fn -> + cache.put_all!(other: 1) + end + end + end + + describe "put_new_all/2" do + test "returns an error", %{cache: cache} = ctx do + assert {:error, %Nebulex.Error{module: module, reason: reason}} = + cache.put_new_all(%{"apples" => 1, "bananas" => 3}) + + assert_error_module(ctx, module) + assert_error_reason(ctx, reason) + end + end + + describe "put_new_all!/2" do + test "raises an error", %{cache: cache} do + assert_raise Nebulex.Error, fn -> + cache.put_new_all!(other: 1) + end + end + end + + describe "fetch/2" do + test "returns an error", %{cache: cache} = ctx do + assert {:error, %Nebulex.Error{module: module, reason: reason}} = cache.fetch(1) + + assert_error_module(ctx, module) + assert_error_reason(ctx, reason) + end + end + + describe "fetch!/2" do + test "raises an error", %{cache: cache} do + assert_raise Nebulex.Error, fn -> + cache.fetch!("raise") + end + end + end + + describe "get/2" do + test "returns an error", %{cache: cache} = ctx do + assert {:error, %Nebulex.Error{module: module, reason: reason}} = cache.get("error") + + assert_error_module(ctx, module) + assert_error_reason(ctx, reason) + end + end + + describe "get!/2" do + test "raises an error", %{cache: cache} do + assert_raise Nebulex.Error, fn -> + cache.get!("raise") + end + end + end + + describe "delete/2" do + test "returns an error", %{cache: cache} = ctx do + assert {:error, %Nebulex.Error{module: module, reason: reason}} = cache.delete("error") + + assert_error_module(ctx, module) + assert_error_reason(ctx, reason) + end + end + + describe "delete!/2" do + test "raises an error", %{cache: cache} do + assert_raise Nebulex.Error, fn -> + cache.delete!("raise") + end + end + end + + describe "take/2" do + test "returns an error", %{cache: cache} = ctx do + assert {:error, %Nebulex.Error{module: module, reason: reason}} = cache.take("error") + + assert_error_module(ctx, module) + assert_error_reason(ctx, reason) + end + end + + describe "take!/2" do + test "raises an error", %{cache: cache} do + assert_raise Nebulex.Error, fn -> + cache.take!("raise") + end + end + end + + describe "has_key?/1" do + test "returns an error", %{cache: cache} = ctx do + assert {:error, %Nebulex.Error{module: module, reason: reason}} = cache.has_key?("error") + + assert_error_module(ctx, module) + assert_error_reason(ctx, reason) + end + end + + describe "update!/4" do + test "raises because put error", %{cache: cache} do + assert_raise Nebulex.Error, fn -> + cache.update!("error", 1, &String.to_integer/1) + end + end + + test "raises because fetch error", %{cache: cache} do + assert_raise Nebulex.Error, fn -> + cache.update!("error", 1, &String.to_integer/1) + end + end + end + + describe "incr!/3" do + test "raises an error", %{cache: cache} do + assert_raise Nebulex.Error, fn -> + cache.incr!(:raise) + end + end + end + + describe "decr!/3" do + test "raises an error", %{cache: cache} do + assert_raise Nebulex.Error, fn -> + cache.decr!(:raise) + end + end + end + end +end diff --git a/test/shared/cache/kv_expiration_error_test.exs b/test/shared/cache/kv_expiration_error_test.exs new file mode 100644 index 00000000..9c9e042d --- /dev/null +++ b/test/shared/cache/kv_expiration_error_test.exs @@ -0,0 +1,21 @@ +defmodule Nebulex.Cache.KVExpirationErrorTest do + import Nebulex.CacheCase + + deftests do + describe "expire!/2" do + test "raises an error", %{cache: cache} do + assert_raise Nebulex.Error, fn -> + cache.expire!(:raise, 100) + end + end + end + + describe "touch!/1" do + test "raises an error", %{cache: cache} do + assert_raise Nebulex.Error, fn -> + cache.touch!(:raise) + end + end + end + end +end diff --git a/test/shared/cache/kv_expiration_test.exs b/test/shared/cache/kv_expiration_test.exs new file mode 100644 index 00000000..77b9cced --- /dev/null +++ b/test/shared/cache/kv_expiration_test.exs @@ -0,0 +1,249 @@ +defmodule Nebulex.Cache.KVExpirationTest do + import Nebulex.CacheCase + + deftests do + describe "ttl option is given to" do + test "put", %{cache: cache} do + assert cache.put!("foo", "bar", ttl: 500) == :ok + assert cache.has_key?("foo") == {:ok, true} + + :ok = Process.sleep(600) + + assert cache.has_key?("foo") == {:ok, false} + end + + test "put_all", %{cache: cache} do + entries = [{0, nil} | for(x <- 1..3, do: {x, x})] + + assert cache.put_all!(entries, ttl: 1000) == :ok + + refute cache.get!(0) + + for x <- 1..3, do: assert(cache.fetch!(x) == x) + + :ok = Process.sleep(1200) + + for x <- 1..3, do: refute(cache.get!(x)) + end + + test "put_new_all", %{cache: cache} do + assert cache.put_new_all!(%{"apples" => 1, "bananas" => 3}, ttl: 1000) == true + assert cache.fetch!("apples") == 1 + assert cache.fetch!("bananas") == 3 + + assert cache.put_new_all!(%{"apples" => 3, "oranges" => 1}) == false + assert cache.fetch!("apples") == 1 + assert cache.fetch!("bananas") == 3 + refute cache.get!("oranges") + + :ok = Process.sleep(1200) + + refute cache.get!("apples") + refute cache.get!("bananas") + end + + test "take", %{cache: cache} do + :ok = cache.put!("foo", "bar", ttl: 500) + + :ok = Process.sleep(600) + + assert {:error, %Nebulex.KeyError{key: "foo"}} = cache.take("foo") + end + + test "take!", %{cache: cache} do + :ok = cache.put!(1, 1, ttl: 100) + + :ok = Process.sleep(500) + + assert_raise Nebulex.KeyError, ~r"key 1", fn -> + cache.take!(1) + end + end + + test "incr! (initializes default value if ttl is expired)", %{cache: cache} do + assert cache.incr!(:counter, 1, ttl: 200) == 1 + assert cache.incr!(:counter) == 2 + + :ok = Process.sleep(210) + + assert cache.incr!(:counter, 1, ttl: 200) == 1 + assert cache.incr!(:counter) == 2 + end + end + + describe "ttl!/1" do + test "returns the remaining ttl for the given key", %{cache: cache} do + assert cache.put!(:a, 1, ttl: 500) == :ok + assert cache.ttl!(:a) > 0 + assert cache.put!(:b, 2) == :ok + + :ok = Process.sleep(10) + + assert cache.ttl!(:a) > 0 + assert cache.ttl!(:b) == :infinity + + :ok = Process.sleep(600) + + assert {:error, %Nebulex.KeyError{key: :a}} = cache.ttl(:a) + assert cache.ttl!(:b) == :infinity + end + + test "raises Nebulex.KeyError if key does not exist", %{cache: cache, name: name} do + msg = ~r"key :non_existent not found in cache: #{inspect(name)}" + + assert_raise Nebulex.KeyError, msg, fn -> + cache.ttl!(:non_existent) + end + end + end + + describe "expire!/2" do + test "alters the expiration time for the given key", %{cache: cache} do + assert cache.put!(:a, 1, ttl: 500) == :ok + assert cache.ttl!(:a) > 0 + + assert cache.expire!(:a, 1000) == true + assert cache.ttl!(:a) > 100 + + assert cache.expire!(:a, :infinity) == true + assert cache.ttl!(:a) == :infinity + end + + test "returns false if key does not exist", %{cache: cache} do + assert cache.expire!(:non_existent, 100) == false + end + + test "raises when ttl is invalid", %{cache: cache} do + assert_raise ArgumentError, ~r"expected ttl to be a valid timeout", fn -> + cache.expire!(:a, "hello") + end + end + end + + describe "touch!/1" do + test "updates the last access time for the given entry", %{cache: cache} do + assert cache.put!(:touch, 1, ttl: 1000) == :ok + + :ok = Process.sleep(100) + + assert cache.touch!(:touch) == true + + :ok = Process.sleep(200) + + assert cache.touch!(:touch) == true + assert cache.fetch!(:touch) == 1 + + :ok = Process.sleep(1100) + + refute cache.get!(:touch) + end + + test "returns false if key does not exist", %{cache: cache} do + assert cache.touch!(:non_existent) == false + end + end + + describe "expiration" do + test "single entry put with ttl", %{cache: cache} do + assert cache.put!(1, 11, ttl: 1000) == :ok + assert cache.fetch!(1) == 11 + + for _ <- 3..1 do + assert cache.ttl!(1) > 0 + + Process.sleep(200) + end + + :ok = Process.sleep(500) + + assert {:error, %Nebulex.KeyError{key: 1}} = cache.ttl(1) + assert cache.put!(1, 11, ttl: 1000) == :ok + assert cache.ttl!(1) > 0 + end + + test "multiple entries put with ttl", %{cache: cache} do + assert cache.put!(1, 11, ttl: 1000) == :ok + assert cache.fetch!(1) == 11 + + :ok = Process.sleep(10) + + assert cache.fetch!(1) == 11 + + :ok = Process.sleep(1100) + + refute cache.get!(1) + + ops = [ + put!: ["foo", "bar", [ttl: 1000]], + put_all!: [[{"foo", "bar"}], [ttl: 1000]] + ] + + for {action, args} <- ops do + assert apply(cache, action, args) == :ok + + :ok = Process.sleep(10) + + assert cache.fetch!("foo") == "bar" + + :ok = Process.sleep(1200) + + refute cache.get!("foo") + end + end + end + + describe "get_and_update with ttl" do + test "existing entry", %{cache: cache} do + assert cache.put!(1, 1, ttl: 1000) == :ok + assert cache.ttl!(1) > 0 + + :ok = Process.sleep(10) + + assert cache.get_and_update!(1, &cache.get_and_update_fun/1) == {1, 2} + assert cache.ttl!(1) == :infinity + + :ok = Process.sleep(1200) + + assert cache.fetch!(1) == 2 + end + end + + describe "update with ttl" do + test "existing entry", %{cache: cache} do + assert cache.put!(1, 1, ttl: 1000) == :ok + assert cache.ttl!(1) > 0 + + :ok = Process.sleep(10) + + assert cache.update!(1, 10, &Integer.to_string/1) == "1" + assert cache.ttl!(1) == :infinity + + :ok = Process.sleep(1200) + + assert cache.fetch!(1) == "1" + end + end + + describe "incr with ttl" do + test "increments a counter", %{cache: cache} do + assert cache.incr!(:counter, 1, ttl: 1000) == 1 + assert cache.ttl!(:counter) > 0 + + :ok = Process.sleep(1200) + + refute cache.get!(:counter) + end + + test "increments a counter and then set ttl", %{cache: cache} do + assert cache.incr!(:counter, 1) == 1 + assert cache.ttl!(:counter) == :infinity + + assert cache.expire!(:counter, 500) == true + + :ok = Process.sleep(600) + + refute cache.get!(:counter) + end + end + end +end diff --git a/test/shared/cache/kv_prop_test.exs b/test/shared/cache/kv_prop_test.exs new file mode 100644 index 00000000..1b25031c --- /dev/null +++ b/test/shared/cache/kv_prop_test.exs @@ -0,0 +1,33 @@ +defmodule Nebulex.Cache.KVPropTest do + import Nebulex.CacheCase + + deftests do + use ExUnitProperties + + describe "key/value entries" do + property "any term", %{cache: cache} do + check all term <- term() do + refute cache.get!(term) + + assert cache.replace!(term, term) == false + assert cache.put!(term, term) == :ok + assert cache.put_new!(term, term) == false + assert cache.fetch!(term) == term + + assert cache.replace!(term, "replaced") == true + assert cache.fetch!(term) == "replaced" + + assert cache.take!(term) == "replaced" + assert {:error, %Nebulex.KeyError{key: key}} = cache.take(term) + assert key == term + + assert cache.put_new!(term, term) == true + assert cache.fetch!(term) == term + + assert cache.delete!(term) == :ok + refute cache.get!(term) + end + end + end + end +end diff --git a/test/shared/cache/kv_test.exs b/test/shared/cache/kv_test.exs new file mode 100644 index 00000000..a068176d --- /dev/null +++ b/test/shared/cache/kv_test.exs @@ -0,0 +1,573 @@ +defmodule Nebulex.Cache.KVTest do + import Nebulex.CacheCase + + deftests do + describe "put/3" do + test "puts the given entry into the cache", %{cache: cache} do + for x <- 1..4, do: assert(cache.put(x, x) == :ok) + + assert cache.fetch!(1) == 1 + assert cache.fetch!(2) == 2 + + for x <- 3..4, do: assert(cache.put(x, x * x) == :ok) + + assert cache.fetch!(3) == 9 + assert cache.fetch!(4) == 16 + end + + test "puts a nil value", %{cache: cache} do + assert cache.put("foo", nil) == :ok + assert cache.fetch("foo") == {:ok, nil} + end + + test "puts a boolean value", %{cache: cache} do + assert cache.put(:boolean, true) == :ok + assert cache.fetch(:boolean) == {:ok, true} + + assert cache.put(:boolean, false) == :ok + assert cache.fetch(:boolean) == {:ok, false} + end + + test "raises when invalid option is given", %{cache: cache} do + assert_raise ArgumentError, ~r"invalid value for :ttl option", fn -> + cache.put("hello", "world", ttl: "1") + end + end + + test "with dynamic_cache", %{cache: cache} = ctx do + if name = Map.get(ctx, :name) do + assert cache.put(name, "foo", "bar", []) == :ok + assert cache.fetch!(name, "foo", []) == "bar" + assert cache.delete(name, "foo", []) == :ok + end + end + + test "with dynamic_cache raises an exception", %{cache: cache} do + assert_raise Nebulex.Error, ~r"could not lookup", fn -> + cache.put!(:invalid, "foo", "bar", []) + end + end + end + + describe "put!/3" do + test "puts the given entry into the cache", %{cache: cache} do + for x <- 1..4, do: assert(cache.put!(x, x) == :ok) + + assert cache.fetch!(1) == 1 + assert cache.fetch!(2) == 2 + + for x <- 3..4, do: assert(cache.put!(x, x * x) == :ok) + + assert cache.fetch!(3) == 9 + assert cache.fetch!(4) == 16 + end + end + + describe "put_new/3" do + test "puts the given entry into the cache if the key does not exist", %{cache: cache} do + assert cache.put_new("foo", "bar") == {:ok, true} + assert cache.fetch!("foo") == "bar" + end + + test "do nothing if key does exist already", %{cache: cache} do + :ok = cache.put("foo", "bar") + + assert cache.put_new("foo", "bar bar") == {:ok, false} + assert cache.fetch!("foo") == "bar" + end + + test "puts a new nil value", %{cache: cache} do + assert cache.put_new(:mykey, nil) == {:ok, true} + assert cache.fetch(:mykey) == {:ok, nil} + end + + test "puts a boolean value", %{cache: cache} do + assert cache.put_new(true, true) == {:ok, true} + assert cache.fetch(true) == {:ok, true} + + assert cache.put_new(false, false) == {:ok, true} + assert cache.fetch(false) == {:ok, false} + end + + test "raises when invalid option is given", %{cache: cache} do + assert_raise ArgumentError, ~r"invalid value for :ttl option", fn -> + cache.put_new("hello", "world", ttl: "1") + end + end + end + + describe "put_new!/3" do + test "puts the given entry into the cache if the key does not exist", %{cache: cache} do + assert cache.put_new!("hello", "world") == true + assert cache.fetch!("hello") == "world" + end + + test "raises false if the key does exist already", %{cache: cache} do + assert cache.put_new!("hello", "world") == true + assert cache.put_new!("hello", "world") == false + end + end + + describe "replace/3" do + test "replaces the cached entry with a new value", %{cache: cache} do + assert cache.replace("foo", "bar") == {:ok, false} + + assert cache.put("foo", "bar") == :ok + assert cache.fetch!("foo") == "bar" + + assert cache.replace("foo", "bar bar") == {:ok, true} + assert cache.fetch!("foo") == "bar bar" + end + + test "existing value with nil", %{cache: cache} do + :ok = cache.put("hello", "world") + + assert cache.replace("hello", nil) == {:ok, true} + assert cache.fetch("hello") == {:ok, nil} + end + + test "existing boolean value", %{cache: cache} do + :ok = cache.put(:boolean, true) + + assert cache.replace(:boolean, false) == {:ok, true} + assert cache.fetch(:boolean) == {:ok, false} + + assert cache.replace(:boolean, true) == {:ok, true} + assert cache.fetch(:boolean) == {:ok, true} + end + + test "raises when invalid option is given", %{cache: cache} do + assert_raise ArgumentError, ~r"invalid value for :ttl option", fn -> + cache.replace("hello", "world", ttl: "1") + end + end + end + + describe "replace!/3" do + test "replaces the cached entry with a new value", %{cache: cache} do + assert cache.put("foo", "bar") == :ok + assert cache.replace!("foo", "bar bar") == true + assert cache.fetch!("foo") == "bar bar" + end + + test "returns false when the key is not found", %{cache: cache} do + assert cache.replace!("foo", "bar") == false + end + end + + describe "put_all/2" do + test "puts the given entries at once", %{cache: cache} do + assert cache.put_all(%{"apples" => 1, "bananas" => 3}) == :ok + assert cache.put_all(blueberries: 2, strawberries: 5) == :ok + assert cache.fetch!("apples") == 1 + assert cache.fetch!("bananas") == 3 + assert cache.fetch!(:blueberries) == 2 + assert cache.fetch!(:strawberries) == 5 + end + + test "empty list or map has not any effect", %{cache: cache} do + assert cache.put_all([]) == :ok + assert cache.put_all(%{}) == :ok + + assert count = cache.count_all() + assert cache.delete_all() == count + end + + test "puts the given entries using different data types at once", %{cache: cache} do + entries = + Enum.reduce(1..100, %{}, fn elem, acc -> + sample = %{ + elem => elem, + :"atom#{elem}" => elem, + "#{elem}" => elem, + {:tuple, elem} => elem, + <<100, elem>> => elem, + [elem] => elem, + true => true, + false => false + } + + Map.merge(acc, sample) + end) + + assert cache.put_all(entries) == :ok + + for {k, v} <- entries, do: assert(cache.fetch!(k) == v) + end + + test "raises when invalid option is given", %{cache: cache} do + assert_raise ArgumentError, ~r"invalid value for :ttl option", fn -> + cache.put_all(%{"apples" => 1, "bananas" => 3}, ttl: "1") + end + end + end + + describe "put_all!/2" do + test "puts the given entries at once", %{cache: cache} do + assert cache.put_all!(%{"apples" => 1, "bananas" => 3}) == :ok + assert cache.put_all!(blueberries: 2, strawberries: 5) == :ok + assert cache.fetch!("apples") == 1 + assert cache.fetch!("bananas") == 3 + assert cache.fetch!(:blueberries) == 2 + assert cache.fetch!(:strawberries) == 5 + end + end + + describe "put_new_all/2" do + test "puts the given entries only if none of the keys does exist already", %{cache: cache} do + assert cache.put_new_all(%{"apples" => 1, "bananas" => 3}) == {:ok, true} + assert cache.fetch!("apples") == 1 + assert cache.fetch!("bananas") == 3 + + assert cache.put_new_all(%{"apples" => 3, "oranges" => 1}) == {:ok, false} + assert cache.fetch!("apples") == 1 + assert cache.fetch!("bananas") == 3 + refute cache.get!("oranges") + end + + test "puts a boolean values", %{cache: cache} do + assert cache.put_new_all(%{true => true, false => false}) == {:ok, true} + assert cache.fetch!(true) == true + assert cache.fetch!(false) == false + end + + test "raises when invalid option is given", %{cache: cache} do + assert_raise ArgumentError, ~r"invalid value for :ttl option", fn -> + cache.put_new_all(%{"apples" => 1, "bananas" => 3}, ttl: "1") + end + end + end + + describe "put_new_all!/2" do + test "puts the given entries only if none of the keys does exist already", %{cache: cache} do + assert cache.put_new_all!(%{"apples" => 1, "bananas" => 3}) == true + assert cache.fetch!("apples") == 1 + assert cache.fetch!("bananas") == 3 + end + + test "raises an error if any of the keys does exist already", %{cache: cache} do + assert cache.put_new_all!(%{"apples" => 1, "bananas" => 3}) == true + assert cache.put_new_all!(%{"apples" => 3, "oranges" => 1}) == false + end + end + + describe "fetch/2" do + test "retrieves a cached entry", %{cache: cache} do + for x <- 1..5 do + :ok = cache.put(x, x) + + assert cache.fetch(x) == {:ok, x} + end + end + + test "returns {:error, :not_found} if key does not exist in cache", %{cache: cache} do + assert {:error, %Nebulex.KeyError{key: "non-existent"}} = cache.fetch("non-existent") + end + end + + describe "fetch!/2" do + test "retrieves a cached entry", %{cache: cache} do + for x <- 1..5 do + :ok = cache.put(x, x) + + assert cache.fetch!(x) == x + end + end + + test "raises when the key does not exist in cache", %{cache: cache, name: name} do + msg = ~r"key \"non-existent\" not found in cache: #{inspect(name)}" + + assert_raise Nebulex.KeyError, msg, fn -> + cache.fetch!("non-existent") + end + end + end + + describe "get/2" do + test "retrieves a cached entry", %{cache: cache} do + for x <- 1..5 do + :ok = cache.put(x, x) + + assert cache.get(x) == {:ok, x} + end + end + + test "returns default if key does not exist in cache", %{cache: cache} do + assert cache.get("non-existent") == {:ok, nil} + assert cache.get("non-existent", "default") == {:ok, "default"} + end + end + + describe "get!/2" do + test "retrieves a cached entry", %{cache: cache} do + for x <- 1..5 do + :ok = cache.put(x, x) + + assert cache.get!(x) == x + end + end + + test "returns default if key does not exist in cache", %{cache: cache} do + refute cache.get!("non-existent") + assert cache.get!("non-existent", "default") == "default" + end + end + + describe "delete/2" do + test "deletes the given key", %{cache: cache} do + for x <- 1..3, do: cache.put(x, x * 2) + + assert cache.fetch!(1) == 2 + assert cache.delete(1) == :ok + refute cache.get!(1) + + assert cache.fetch!(2) == 4 + assert cache.fetch!(3) == 6 + + assert cache.delete(:non_existent) == :ok + refute cache.get!(:non_existent) + end + + test "deletes boolean and nil values", %{cache: cache} do + :ok = cache.put_all(true: true, false: false, nil: nil) + + assert cache.fetch!(true) == true + assert cache.fetch!(false) == false + assert cache.fetch!(nil) == nil + + assert cache.delete(true) == :ok + assert cache.delete(false) == :ok + assert cache.delete(nil) == :ok + + refute cache.get!(true) + refute cache.get!(false) + refute cache.get!(nil) + end + end + + describe "delete!/2" do + test "deletes the given key", %{cache: cache} do + assert cache.put("foo", "bar") == :ok + + assert cache.fetch!("foo") == "bar" + assert cache.delete!("foo") == :ok + refute cache.get!("foo") + end + end + + describe "take/2" do + test "returns the given key and removes it from cache", %{cache: cache} do + for x <- 1..5 do + :ok = cache.put(x, x) + + assert cache.take(x) == {:ok, x} + assert {:error, %Nebulex.KeyError{key: ^x}} = cache.take(x) + end + end + + test "returns boolean and nil values", %{cache: cache} do + :ok = cache.put_all(true: true, false: false, nil: nil) + + assert cache.take(true) == {:ok, true} + assert cache.take(false) == {:ok, false} + assert cache.take(nil) == {:ok, nil} + + refute cache.get!(true) + refute cache.get!(false) + refute cache.get!(nil) + end + + test "returns nil if the key does not exist in cache", %{cache: cache} do + assert {:error, %Nebulex.KeyError{key: :non_existent}} = cache.take(:non_existent) + assert {:error, %Nebulex.KeyError{key: nil}} = cache.take(nil) + end + end + + describe "take!/2" do + test "returns the given key and removes it from cache", %{cache: cache} do + assert cache.put(1, 1) == :ok + assert cache.take!(1) == 1 + assert cache.get!(1) == nil + end + + test "raises when the key does not exist in cache", %{cache: cache, name: name} do + msg = ~r"key \"non-existent\" not found in cache: #{inspect(name)}" + + assert_raise Nebulex.KeyError, msg, fn -> + cache.take!("non-existent") + end + end + end + + describe "has_key?/1" do + test "returns true if key does exist in cache", %{cache: cache} do + for x <- 1..5 do + :ok = cache.put(x, x) + + assert cache.has_key?(x) == {:ok, true} + end + end + + test "returns boolean and nil values", %{cache: cache} do + :ok = cache.put_all(true: true, false: false, nil: nil) + + assert cache.has_key?(true) == {:ok, true} + assert cache.has_key?(false) == {:ok, true} + assert cache.has_key?(nil) == {:ok, true} + end + + test "returns false if key does not exist in cache", %{cache: cache} do + assert cache.has_key?(:non_existent) == {:ok, false} + assert cache.has_key?(nil) == {:ok, false} + end + end + + describe "update!/4" do + test "updates an entry under a key applying a function on the value", %{cache: cache} do + :ok = cache.put("foo", "123") + :ok = cache.put("bar", "foo") + + assert cache.update!("foo", 1, &String.to_integer/1) == 123 + assert cache.update!("bar", "init", &String.to_atom/1) == :foo + end + + test "creates the entry with the default value if key does not exist", %{cache: cache} do + assert cache.update!("foo", "123", &Integer.to_string/1) == "123" + end + + test "updates existing value with nil", %{cache: cache} do + assert cache.update!("bar", nil, &Integer.to_string/1) == nil + assert cache.fetch!("bar") == nil + end + + test "raises because the cache is not started", %{cache: cache} do + :ok = cache.stop() + + assert_raise Nebulex.Error, fn -> + cache.update!("error", 1, &String.to_integer/1) + end + end + end + + describe "incr/3" do + test "increments a counter by the given amount", %{cache: cache} do + assert cache.incr(:counter) == {:ok, 1} + assert cache.incr(:counter) == {:ok, 2} + assert cache.incr(:counter, 2) == {:ok, 4} + assert cache.incr(:counter, 3) == {:ok, 7} + assert cache.incr(:counter, 0) == {:ok, 7} + + assert :counter |> cache.fetch!() |> to_int() == 7 + + assert cache.incr(:counter, -1) == {:ok, 6} + assert cache.incr(:counter, -1) == {:ok, 5} + assert cache.incr(:counter, -2) == {:ok, 3} + assert cache.incr(:counter, -3) == {:ok, 0} + end + + test "increments a counter by the given amount with default", %{cache: cache} do + assert cache.incr(:counter1, 1, default: 10) == {:ok, 11} + assert cache.incr(:counter2, 2, default: 10) == {:ok, 12} + assert cache.incr(:counter3, -2, default: 10) == {:ok, 8} + end + + test "increments a counter by the given amount ignoring the default", %{cache: cache} do + assert cache.incr(:counter) == {:ok, 1} + assert cache.incr(:counter, 1, default: 10) == {:ok, 2} + assert cache.incr(:counter, -1, default: 100) == {:ok, 1} + end + + test "raises when amount is invalid", %{cache: cache} do + assert_raise ArgumentError, ~r"invalid value for amount argument", fn -> + cache.incr(:counter, "foo") + end + end + + test "raises when default is invalid", %{cache: cache} do + assert_raise ArgumentError, ~r"invalid value for :default option: expected integer", fn -> + cache.incr(:counter, 1, default: :invalid) + end + end + end + + describe "incr!/3" do + test "increments a counter by the given amount", %{cache: cache} do + assert cache.incr!(:counter) == 1 + assert cache.incr!(:counter) == 2 + assert cache.incr!(:counter, 2) == 4 + assert cache.incr!(:counter, 3) == 7 + assert cache.incr!(:counter, 0) == 7 + + assert :counter |> cache.fetch!() |> to_int() == 7 + + assert cache.incr!(:counter, -1) == 6 + assert cache.incr!(:counter, -1) == 5 + assert cache.incr!(:counter, -2) == 3 + assert cache.incr!(:counter, -3) == 0 + end + end + + describe "decr/3" do + test "decrements a counter by the given amount", %{cache: cache} do + assert cache.decr(:counter) == {:ok, -1} + assert cache.decr(:counter) == {:ok, -2} + assert cache.decr(:counter, 2) == {:ok, -4} + assert cache.decr(:counter, 3) == {:ok, -7} + assert cache.decr(:counter, 0) == {:ok, -7} + + assert :counter |> cache.fetch!() |> to_int() == -7 + + assert cache.decr(:counter, -1) == {:ok, -6} + assert cache.decr(:counter, -1) == {:ok, -5} + assert cache.decr(:counter, -2) == {:ok, -3} + assert cache.decr(:counter, -3) == {:ok, 0} + end + + test "decrements a counter by the given amount with default", %{cache: cache} do + assert cache.decr(:counter1, 1, default: 10) == {:ok, 9} + assert cache.decr(:counter2, 2, default: 10) == {:ok, 8} + assert cache.decr(:counter3, -2, default: 10) == {:ok, 12} + end + + test "decrements a counter by the given amount ignoring the default", %{cache: cache} do + assert cache.decr(:counter) == {:ok, -1} + assert cache.decr(:counter, 1, default: 10) == {:ok, -2} + assert cache.decr(:counter, -1, default: 100) == {:ok, -1} + end + + test "raises when amount is invalid", %{cache: cache} do + assert_raise ArgumentError, ~r"invalid value for amount argument", fn -> + cache.decr(:counter, "foo") + end + end + + test "raises when default is invalid", %{cache: cache} do + assert_raise ArgumentError, ~r"invalid value for :default option: expected integer", fn -> + cache.decr(:counter, 1, default: :invalid) + end + end + end + + describe "decr!/3" do + test "decrements a counter by the given amount", %{cache: cache} do + assert cache.decr!(:counter) == -1 + assert cache.decr!(:counter) == -2 + assert cache.decr!(:counter, 2) == -4 + assert cache.decr!(:counter, 3) == -7 + assert cache.decr!(:counter, 0) == -7 + + assert :counter |> cache.fetch!() |> to_int() == -7 + + assert cache.decr!(:counter, -1) == -6 + assert cache.decr!(:counter, -1) == -5 + assert cache.decr!(:counter, -2) == -3 + assert cache.decr!(:counter, -3) == 0 + end + end + + ## Helpers + + defp to_int(data) when is_integer(data), do: data + defp to_int(data) when is_binary(data), do: String.to_integer(data) + end +end diff --git a/test/shared/cache/persistence_error_test.exs b/test/shared/cache/persistence_error_test.exs index cd246e53..e8e4e28e 100644 --- a/test/shared/cache/persistence_error_test.exs +++ b/test/shared/cache/persistence_error_test.exs @@ -2,12 +2,43 @@ defmodule Nebulex.Cache.PersistenceErrorTest do import Nebulex.CacheCase deftests "persistence error" do - test "dump: invalid path", %{cache: cache} do - assert cache.dump("/invalid/path") == {:error, :enoent} + test "dump/2 fails because invalid path", %{cache: cache} do + assert cache.dump("/invalid/path") == + {:error, + %Nebulex.Error{ + module: Nebulex.Error, + opts: [cache: cache], + reason: %File.Error{action: "open", path: "/invalid/path", reason: :enoent} + }} end - test "load: invalid path", %{cache: cache} do - assert cache.load("wrong_file") == {:error, :enoent} + test "dump!/2 raises because invalid path", %{cache: cache} do + err = """ + the following exception occurred in the cache #{inspect(cache)}. + + ** (File.Error) could not open \"/invalid/path\": no such file or directory + + """ + + assert_raise Nebulex.Error, err, fn -> + cache.dump!("/invalid/path") + end + end + + test "load/2 error because invalid path", %{cache: cache} do + assert cache.load("wrong_file") == + {:error, + %Nebulex.Error{ + module: Nebulex.Error, + opts: [cache: cache], + reason: %File.Error{action: "open", path: "wrong_file", reason: :enoent} + }} + end + + test "load!/2 raises because invalid path", %{cache: cache} do + assert_raise Nebulex.Error, ~r"could not open \"wrong_file\": no such file", fn -> + cache.load!("wrong_file") + end end end end diff --git a/test/shared/cache/persistence_test.exs b/test/shared/cache/persistence_test.exs index 5d77b954..e8fea6ea 100644 --- a/test/shared/cache/persistence_test.exs +++ b/test/shared/cache/persistence_test.exs @@ -7,31 +7,31 @@ defmodule Nebulex.Cache.PersistenceTest do path = "#{tmp}/#{cache}" try do - assert cache.count_all() == 0 + assert cache.count_all!() == 0 assert cache.dump(path) == :ok assert File.exists?(path) assert cache.load(path) == :ok - assert cache.count_all() == 0 + assert cache.count_all!() == 0 count = 100 unexpired = for x <- 1..count, into: %{}, do: {x, x} assert cache.put_all(unexpired) == :ok assert cache.put_all(%{a: 1, b: 2}, ttl: 10) == :ok - assert cache.put_all(%{c: 1, d: 2}, ttl: 3_600_000) == :ok - assert cache.count_all() == count + 4 + assert cache.put_all(%{c: 1, d: 2}, ttl: :timer.hours(1)) == :ok + assert cache.count_all!() == count + 4 :ok = Process.sleep(1000) assert cache.dump(path) == :ok assert File.exists?(path) - assert cache.delete_all() == count + 4 - assert cache.count_all() == 0 + assert cache.delete_all!() == count + 4 + assert cache.count_all!() == 0 assert cache.load(path) == :ok - assert cache.get_all(1..count) == unexpired - assert cache.get_all([:a, :b, :c, :d]) == %{c: 1, d: 2} - assert cache.count_all() == count + 2 + assert cache.get_all!({:in, Enum.to_list(1..count)}) |> Map.new() == unexpired + assert cache.get_all!({:in, [:a, :b, :c, :d]}) |> Map.new() == %{c: 1, d: 2} + assert cache.count_all!() == count + 2 after File.rm_rf!(path) end diff --git a/test/shared/cache/queryable_test.exs b/test/shared/cache/queryable_test.exs index 22d9c000..255f3890 100644 --- a/test/shared/cache/queryable_test.exs +++ b/test/shared/cache/queryable_test.exs @@ -4,88 +4,173 @@ defmodule Nebulex.Cache.QueryableTest do deftests do import Nebulex.CacheCase - describe "all/2" do - test "returns all keys in cache", %{cache: cache} do + describe "get_all/2" do + test "ok: returns all keys in cache", %{cache: cache} do set1 = cache_put(cache, 1..50) set2 = cache_put(cache, 51..100) - for x <- 1..100, do: assert(cache.get(x) == x) + for x <- 1..100, do: assert(cache.fetch!(x) == x) expected = set1 ++ set2 - assert :lists.usort(cache.all()) == expected + assert cache.get_all!() |> :lists.usort() == List.zip([expected, expected]) + assert cache.get_all!(nil, return: :keys) |> :lists.usort() == expected + assert cache.get_all!(nil, return: :values) |> :lists.usort() == expected set3 = Enum.to_list(20..60) - :ok = Enum.each(set3, &cache.delete(&1)) + :ok = Enum.each(set3, &cache.delete!(&1)) expected = :lists.usort(expected -- set3) - assert :lists.usort(cache.all()) == expected + assert cache.get_all!() |> :lists.usort() == List.zip([expected, expected]) + assert cache.get_all!(nil, return: :keys) |> :lists.usort() == expected + assert cache.get_all!(nil, return: :values) |> :lists.usort() == expected + end + + test "error: query error", %{cache: cache} = test_opts do + on_error = test_opts[:on_error] || fn %Nebulex.Error{reason: :invalid_query} -> :ok end + + assert {:error, reason} = cache.get_all(:invalid) + on_error.(reason) end end describe "stream/2" do @entries for x <- 1..10, into: %{}, do: {x, x * 2} - test "returns all keys in cache", %{cache: cache} do + test "ok: returns all keys in cache", %{cache: cache} do :ok = cache.put_all(@entries) assert nil - |> cache.stream() + |> cache.stream!(return: :keys) |> Enum.to_list() |> :lists.usort() == Map.keys(@entries) end - test "returns all values in cache", %{cache: cache} do + test "ok: returns all values in cache", %{cache: cache} do :ok = cache.put_all(@entries) assert nil - |> cache.stream(return: :value, page_size: 3) + |> cache.stream!(return: :values, page_size: 3) |> Enum.to_list() |> :lists.usort() == Map.values(@entries) end - test "returns all key/value pairs in cache", %{cache: cache} do + test "ok: returns all key/value pairs in cache", %{cache: cache} do :ok = cache.put_all(@entries) assert nil - |> cache.stream(return: {:key, :value}, page_size: 3) + |> cache.stream!(return: :entries, page_size: 3) |> Enum.to_list() |> :lists.usort() == :maps.to_list(@entries) end - test "raises when query is invalid", %{cache: cache} do - assert_raise Nebulex.QueryError, fn -> + test "error: raises when query is invalid", %{cache: cache} do + assert_raise Nebulex.Error, ~r"invalid query", fn -> :invalid_query - |> cache.stream() + |> cache.stream!() |> Enum.to_list() end end end describe "delete_all/2" do - test "evicts all entries in the cache", %{cache: cache} do + test "ok: evicts all entries in the cache", %{cache: cache} do Enum.each(1..2, fn _ -> entries = cache_put(cache, 1..50) - assert cache.all() |> :lists.usort() |> length() == length(entries) + assert cache.get_all!() |> :lists.usort() |> length() == length(entries) - cached = cache.count_all() - assert cache.delete_all() == cached - assert cache.count_all() == 0 + cached = cache.count_all!() + assert cache.delete_all!() == cached + assert cache.count_all!() == 0 end) end + + test "error: query error", %{cache: cache} = test_opts do + on_error = test_opts[:on_error] || fn %Nebulex.Error{reason: :invalid_query} -> :ok end + + assert {:error, reason} = cache.delete_all(:invalid) + on_error.(reason) + end end describe "count_all/2" do - test "returns the total number of cached entries", %{cache: cache} do + test "ok: returns the total number of cached entries", %{cache: cache} do for x <- 1..100, do: cache.put(x, x) - total = cache.all() |> length() - assert cache.count_all() == total + total = cache.get_all!() |> length() + assert cache.count_all!() == total + + for x <- 1..50, do: cache.delete!(x) + total = cache.get_all!() |> length() + assert cache.count_all!() == total + + for x <- 51..60, do: assert(cache.fetch!(x) == x) + end + + test "error: query error", %{cache: cache} = test_opts do + on_error = test_opts[:on_error] || fn %Nebulex.Error{reason: :invalid_query} -> :ok end - for x <- 1..50, do: cache.delete(x) - total = cache.all() |> length() - assert cache.count_all() == total + assert {:error, reason} = cache.count_all(:invalid) + on_error.(reason) + end + end + + describe "all!/2 - {:in, keys}" do + test "ok: returns the entries associated to the requested keys", %{cache: cache} do + assert cache.put_all(a: 1, c: 3) == :ok + + query = {:in, [:a, :b, :c]} + + assert cache.get_all!(query) |> Map.new() == %{a: 1, c: 3} + assert cache.get_all!(query, return: :keys) |> :lists.usort() == [:a, :c] + assert cache.get_all!(query, return: :values) |> :lists.usort() == [1, 3] + + assert cache.delete_all!() == 2 + end + + test "ok: returns an empty list when none of the given keys is in cache", %{cache: cache} do + assert cache.get_all!({:in, ["foo", "bar", 1, :a]}) == [] + end + + test "ok: returns an empty list when the given key list is empty", %{cache: cache} do + assert cache.get_all!({:in, []}) == [] + end + end + + describe "count_all!/2 - {:in, keys})" do + test "ok: returns the count of the requested keys", %{cache: cache} do + assert cache.put_all(a: 1, c: 3, d: 4) == :ok + + assert cache.count_all!({:in, [:a, :b, :c]}) == 2 + + assert cache.delete_all!() == 3 + end + + test "ok: returns 0 when none of the given keys is in cache", %{cache: cache} do + assert cache.count_all!({:in, ["foo", "bar", 1, :a]}) == 0 + end + + test "ok: returns 0 when the given key list is empty", %{cache: cache} do + assert cache.count_all!({:in, []}) == 0 + end + end + + describe "delete_all!/2 - {:in, keys}" do + test "ok: returns the count of the deleted keys", %{cache: cache} do + assert cache.put_all(a: 1, c: 3, d: 4) == :ok + + assert cache.delete_all!({:in, [:a, :b, :c]}) == 2 + assert cache.get_all!() == [d: 4] + + assert cache.delete_all!() == 1 + assert cache.get_all!() == [] + end + + test "ok: returns 0 when none of the given keys is in cache", %{cache: cache} do + assert cache.delete_all!({:in, ["foo", "bar", 1, :a]}) == 0 + end - for x <- 51..60, do: assert(cache.get(x) == x) + test "ok: returns 0 when the given key list is empty", %{cache: cache} do + assert cache.delete_all!({:in, []}) == 0 end end end diff --git a/test/shared/cache/transaction_test.exs b/test/shared/cache/transaction_test.exs index 7a7d9343..9383c755 100644 --- a/test/shared/cache/transaction_test.exs +++ b/test/shared/cache/transaction_test.exs @@ -4,57 +4,65 @@ defmodule Nebulex.Cache.TransactionTest do deftests do describe "transaction" do test "ok: single transaction", %{cache: cache} do - refute cache.transaction(fn -> - with :ok <- cache.put(1, 11), - 11 <- cache.get!(1), - :ok <- cache.delete(1) do - cache.get(1) - end - end) + assert cache.transaction(fn -> + :ok = cache.put!(1, 11) + + 11 = cache.fetch!(1) + + :ok = cache.delete!(1) + + cache.get!(1) + end) == {:ok, nil} end test "ok: nested transaction", %{cache: cache} do - refute cache.transaction( - [keys: [1]], + assert cache.transaction( fn -> cache.transaction( - [keys: [2]], fn -> - with :ok <- cache.put(1, 11), - 11 <- cache.get!(1), - :ok <- cache.delete(1) do - cache.get(1) - end - end + :ok = cache.put!(1, 11) + + 11 = cache.fetch!(1) + + :ok = cache.delete!(1) + + cache.get!(1) + end, + keys: [2] ) - end - ) + end, + keys: [1] + ) == {:ok, {:ok, nil}} end test "ok: single transaction with read and write operations", %{cache: cache} do assert cache.put(:test, ["old value"]) == :ok - assert cache.get(:test) == ["old value"] + assert cache.fetch!(:test) == ["old value"] assert cache.transaction( - [keys: [:test]], fn -> - ["old value"] = value = cache.get(:test) - :ok = cache.put(:test, ["new value" | value]) - cache.get(:test) - end - ) == ["new value", "old value"] + ["old value"] = value = cache.fetch!(:test) + + :ok = cache.put!(:test, ["new value" | value]) + + cache.fetch!(:test) + end, + keys: [:test] + ) == {:ok, ["new value", "old value"]} - assert cache.get(:test) == ["new value", "old value"] + assert cache.fetch!(:test) == ["new value", "old value"] end - test "raises exception", %{cache: cache} do + test "error: exception is raised", %{cache: cache} do assert_raise MatchError, fn -> cache.transaction(fn -> - with :ok <- cache.put(1, 11), - 11 <- cache.get!(1), - :ok <- cache.delete(1) do - :ok = cache.get(1) - end + :ok = cache.put!(1, 11) + + 11 = cache.fetch!(1) + + :ok = cache.delete!(1) + + :ok = cache.get(1) end) end end @@ -66,34 +74,39 @@ defmodule Nebulex.Cache.TransactionTest do _ = cache.put_dynamic_cache(name) cache.transaction( - [keys: [key], retries: 1], fn -> :ok = cache.put(key, true) - Process.sleep(2000) - end + + Process.sleep(1100) + end, + keys: [key], + retries: 1 ) end) :ok = Process.sleep(200) - assert_raise RuntimeError, "transaction aborted", fn -> - cache.transaction( - [keys: [key], retries: 1], - fn -> - cache.get(key) - end - ) + assert_raise Nebulex.Error, ~r"cache #{inspect(name)} has aborted a transaction", fn -> + {:error, %Nebulex.Error{} = reason} = + cache.transaction( + fn -> cache.get(key) end, + keys: [key], + retries: 1 + ) + + raise reason end end end describe "in_transaction?" do test "returns true if calling process is already within a transaction", %{cache: cache} do - refute cache.in_transaction?() + assert cache.in_transaction?() == {:ok, false} cache.transaction(fn -> - :ok = cache.put(1, 11, return: :key) - true = cache.in_transaction?() + :ok = cache.put(1, 11) + + assert cache.in_transaction?() == {:ok, true} end) end end diff --git a/test/shared/cache_test.exs b/test/shared/cache_test_case.exs similarity index 58% rename from test/shared/cache_test.exs rename to test/shared/cache_test_case.exs index 792323ca..09130d58 100644 --- a/test/shared/cache_test.exs +++ b/test/shared/cache_test_case.exs @@ -1,18 +1,17 @@ -defmodule Nebulex.CacheTest do +defmodule Nebulex.CacheTestCase do @moduledoc """ Shared Tests """ defmacro __using__(_opts) do quote do - use Nebulex.Cache.EntryTest - use Nebulex.Cache.EntryExpirationTest - use Nebulex.Cache.EntryPropTest + use Nebulex.Cache.KVTest + use Nebulex.Cache.KVExpirationTest + use Nebulex.Cache.KVPropTest use Nebulex.Cache.QueryableTest use Nebulex.Cache.TransactionTest use Nebulex.Cache.PersistenceTest use Nebulex.Cache.PersistenceErrorTest - use Nebulex.Cache.DeprecatedTest end end end diff --git a/test/shared/local_test.exs b/test/shared/local_test.exs deleted file mode 100644 index 768679a0..00000000 --- a/test/shared/local_test.exs +++ /dev/null @@ -1,526 +0,0 @@ -defmodule Nebulex.LocalTest do - import Nebulex.CacheCase - - deftests do - import Ex2ms - import Nebulex.CacheCase - - alias Nebulex.{Adapter, Entry} - - describe "error" do - test "on init because invalid backend", %{cache: cache} do - assert {:error, {%RuntimeError{message: msg}, _}} = - cache.start_link(name: :invalid_backend, backend: :xyz) - - assert msg == - "expected backend: option to be one of the supported " <> - "backends [:ets, :shards], got: :xyz" - end - - test "because cache is stopped", %{cache: cache} do - :ok = cache.stop() - - msg = ~r"could not lookup Nebulex cache" - assert_raise Nebulex.RegistryLookupError, msg, fn -> cache.put(1, 13) end - assert_raise Nebulex.RegistryLookupError, msg, fn -> cache.get(1) end - assert_raise Nebulex.RegistryLookupError, msg, fn -> cache.delete(1) end - end - end - - describe "entry:" do - test "get_and_update", %{cache: cache} do - fun = fn - nil -> {nil, 1} - val -> {val, val * 2} - end - - assert cache.get_and_update(1, fun) == {nil, 1} - - assert cache.get_and_update(1, &{&1, &1 * 2}) == {1, 2} - assert cache.get_and_update(1, &{&1, &1 * 3}) == {2, 6} - assert cache.get_and_update(1, &{&1, nil}) == {6, 6} - assert cache.get(1) == 6 - assert cache.get_and_update(1, fn _ -> :pop end) == {6, nil} - assert cache.get_and_update(1, fn _ -> :pop end) == {nil, nil} - assert cache.get_and_update(3, &{&1, 3}) == {nil, 3} - - assert_raise ArgumentError, fn -> - cache.get_and_update(1, fn _ -> :other end) - end - end - - test "incr and update", %{cache: cache} do - assert cache.incr(:counter) == 1 - assert cache.incr(:counter) == 2 - - assert cache.get_and_update(:counter, &{&1, &1 * 2}) == {2, 4} - assert cache.incr(:counter) == 5 - - assert cache.update(:counter, 1, &(&1 * 2)) == 10 - assert cache.incr(:counter, -10) == 0 - - assert cache.put("foo", "bar") == :ok - - assert_raise ArgumentError, fn -> - cache.incr("foo") - end - end - - test "incr with ttl", %{cache: cache} do - assert cache.incr(:counter_with_ttl, 1, ttl: 1000) == 1 - assert cache.incr(:counter_with_ttl) == 2 - assert cache.get(:counter_with_ttl) == 2 - - :ok = Process.sleep(1010) - refute cache.get(:counter_with_ttl) - - assert cache.incr(:counter_with_ttl, 1, ttl: 5000) == 1 - assert cache.ttl(:counter_with_ttl) > 1000 - - assert cache.expire(:counter_with_ttl, 500) - :ok = Process.sleep(600) - refute cache.get(:counter_with_ttl) - end - - test "incr existing entry", %{cache: cache} do - assert cache.put(:counter, 0) == :ok - assert cache.incr(:counter) == 1 - assert cache.incr(:counter, 2) == 3 - end - end - - describe "queryable:" do - test "ETS match_spec queries", %{cache: cache, name: name} do - values = cache_put(cache, 1..5, &(&1 * 2)) - _ = new_generation(cache, name) - values = values ++ cache_put(cache, 6..10, &(&1 * 2)) - - assert nil - |> cache.stream(page_size: 3, return: :value) - |> Enum.to_list() - |> :lists.usort() == values - - {_, expected} = Enum.split(values, 5) - - test_ms = - fun do - {_, _, value, _, _} when value > 10 -> value - end - - for action <- [:all, :stream] do - assert all_or_stream(cache, action, test_ms, page_size: 3, return: :value) == expected - - msg = ~r"invalid match spec" - - assert_raise Nebulex.QueryError, msg, fn -> - all_or_stream(cache, action, :invalid_query) - end - end - end - - test "expired and unexpired queries", %{cache: cache} do - for action <- [:all, :stream] do - expired = cache_put(cache, 1..5, &(&1 * 2), ttl: 1000) - unexpired = cache_put(cache, 6..10, &(&1 * 2)) - - all = expired ++ unexpired - - opts = [page_size: 3, return: :value] - - assert all_or_stream(cache, action, nil, opts) == all - assert all_or_stream(cache, action, :unexpired, opts) == all - assert all_or_stream(cache, action, :expired, opts) == [] - - :ok = Process.sleep(1100) - - assert all_or_stream(cache, action, :unexpired, opts) == unexpired - assert all_or_stream(cache, action, :expired, opts) == expired - end - end - - test "all entries", %{cache: cache} do - assert cache.put_all([a: 1, b: 2, c: 3], ttl: 5000) == :ok - - assert all = cache.all(:unexpired, return: :entry) - assert length(all) == 3 - - for %Entry{} = entry <- all do - assert Entry.ttl(entry) > 0 - end - end - - test "delete all expired and unexpired entries", %{cache: cache} do - _ = cache_put(cache, 1..5, & &1, ttl: 1500) - _ = cache_put(cache, 6..10) - - assert cache.delete_all(:expired) == 0 - assert cache.count_all(:expired) == 0 - - :ok = Process.sleep(1600) - - assert cache.delete_all(:expired) == 5 - assert cache.count_all(:expired) == 0 - assert cache.count_all(:unexpired) == 5 - - assert cache.delete_all(:unexpired) == 5 - assert cache.count_all(:unexpired) == 0 - assert cache.count_all() == 0 - end - - test "delete all matched entries", %{cache: cache, name: name} do - values = cache_put(cache, 1..5) - - _ = new_generation(cache, name) - - values = values ++ cache_put(cache, 6..10) - - assert cache.count_all() == 10 - - test_ms = - fun do - {_, _, value, _, _} when rem(value, 2) == 0 -> value - end - - {expected, rem} = Enum.split_with(values, &(rem(&1, 2) == 0)) - - assert cache.count_all(test_ms) == 5 - assert cache.all(test_ms) |> Enum.sort() == Enum.sort(expected) - - assert cache.delete_all(test_ms) == 5 - assert cache.count_all(test_ms) == 0 - assert cache.all() |> Enum.sort() == Enum.sort(rem) - end - - test "delete all entries with special query {:in, keys}", %{cache: cache} do - entries = for x <- 1..10, into: %{}, do: {x, x} - - :ok = cache.put_all(entries) - - assert cache.count_all() == 10 - - assert cache.delete_all({:in, [2, 4, 6, 8, 10, 12]}) == 5 - - assert cache.count_all() == 5 - assert cache.all() |> Enum.sort() == [1, 3, 5, 7, 9] - end - - test "delete all entries with special query {:in, keys} (nested tuples)", %{cache: cache} do - [ - {1, {:foo, "bar"}}, - {2, {nil, nil}}, - {3, {nil, {nil, nil}}}, - {4, {nil, {nil, nil, {nil, nil}}}}, - {5, {:a, {:b, {:c, {:d, {:e, "f"}}}}}}, - {6, {:a, :b, {:c, :d, {:e, :f, {:g, :h, {:i, :j, "k"}}}}}} - ] - |> Enum.each(fn {k, v} -> - :ok = cache.put(k, v) - - assert cache.count_all() == 1 - assert cache.delete_all({:in, [k]}) == 1 - assert cache.count_all() == 0 - end) - end - end - - describe "older generation hitted on" do - test "put/3 (key is removed from older generation)", %{cache: cache, name: name} do - :ok = cache.put("foo", "bar") - - _ = new_generation(cache, name) - - refute get_from_new(cache, name, "foo") - assert get_from_old(cache, name, "foo") == "bar" - - :ok = cache.put("foo", "bar bar") - - assert get_from_new(cache, name, "foo") == "bar bar" - refute get_from_old(cache, name, "foo") - end - - test "put_new/3 (fallback to older generation)", %{cache: cache, name: name} do - assert cache.put_new("foo", "bar") == true - - _ = new_generation(cache, name) - - refute get_from_new(cache, name, "foo") - assert get_from_old(cache, name, "foo") == "bar" - - assert cache.put_new("foo", "bar") == false - - refute get_from_new(cache, name, "foo") - assert get_from_old(cache, name, "foo") == "bar" - - _ = new_generation(cache, name) - - assert cache.put_new("foo", "bar") == true - - assert get_from_new(cache, name, "foo") == "bar" - refute get_from_old(cache, name, "foo") - end - - test "replace/3 (fallback to older generation)", %{cache: cache, name: name} do - assert cache.replace("foo", "bar bar") == false - - :ok = cache.put("foo", "bar") - - _ = new_generation(cache, name) - - refute get_from_new(cache, name, "foo") - assert get_from_old(cache, name, "foo") == "bar" - - assert cache.replace("foo", "bar bar") == true - - assert get_from_new(cache, name, "foo") == "bar bar" - refute get_from_old(cache, name, "foo") - - _ = new_generation(cache, name) - _ = new_generation(cache, name) - - assert cache.replace("foo", "bar bar") == false - end - - test "put_all/2 (keys are removed from older generation)", %{cache: cache, name: name} do - entries = Enum.map(1..100, &{{:key, &1}, &1}) - - :ok = cache.put_all(entries) - - _ = new_generation(cache, name) - - Enum.each(entries, fn {k, v} -> - refute get_from_new(cache, name, k) - assert get_from_old(cache, name, k) == v - end) - - :ok = cache.put_all(entries) - - Enum.each(entries, fn {k, v} -> - assert get_from_new(cache, name, k) == v - refute get_from_old(cache, name, k) - end) - end - - test "put_new_all/2 (fallback to older generation)", %{cache: cache, name: name} do - entries = Enum.map(1..100, &{&1, &1}) - - assert cache.put_new_all(entries) == true - - _ = new_generation(cache, name) - - Enum.each(entries, fn {k, v} -> - refute get_from_new(cache, name, k) - assert get_from_old(cache, name, k) == v - end) - - assert cache.put_new_all(entries) == false - - Enum.each(entries, fn {k, v} -> - refute get_from_new(cache, name, k) - assert get_from_old(cache, name, k) == v - end) - - _ = new_generation(cache, name) - - assert cache.put_new_all(entries) == true - - Enum.each(entries, fn {k, v} -> - assert get_from_new(cache, name, k) == v - refute get_from_old(cache, name, k) - end) - end - - test "expire/3 (fallback to older generation)", %{cache: cache, name: name} do - assert cache.put("foo", "bar") == :ok - - _ = new_generation(cache, name) - - refute get_from_new(cache, name, "foo") - assert get_from_old(cache, name, "foo") == "bar" - - assert cache.expire("foo", 200) == true - - assert get_from_new(cache, name, "foo") == "bar" - refute get_from_old(cache, name, "foo") - - :ok = Process.sleep(210) - - refute cache.get("foo") - end - - test "incr/3 (fallback to older generation)", %{cache: cache, name: name} do - assert cache.put(:counter, 0, ttl: 200) == :ok - - _ = new_generation(cache, name) - - refute get_from_new(cache, name, :counter) - assert get_from_old(cache, name, :counter) == 0 - - assert cache.incr(:counter) == 1 - assert cache.incr(:counter) == 2 - - assert get_from_new(cache, name, :counter) == 2 - refute get_from_old(cache, name, :counter) - - :ok = Process.sleep(210) - - assert cache.incr(:counter) == 1 - end - - test "all/2 (no duplicates)", %{cache: cache, name: name} do - entries = for x <- 1..20, into: %{}, do: {x, x} - keys = Map.keys(entries) |> Enum.sort() - - :ok = cache.put_all(entries) - - assert cache.count_all() == 20 - assert cache.all() |> Enum.sort() == keys - - _ = new_generation(cache, name) - - :ok = cache.put_all(entries) - - assert cache.count_all() == 20 - assert cache.all() |> Enum.sort() == keys - - _ = new_generation(cache, name) - - more_entries = for x <- 10..30, into: %{}, do: {x, x} - more_keys = Map.keys(more_entries) |> Enum.sort() - - :ok = cache.put_all(more_entries) - - assert cache.count_all() == 30 - assert cache.all() |> Enum.sort() == (keys ++ more_keys) |> Enum.uniq() - - _ = new_generation(cache, name) - - assert cache.count_all() == 21 - assert cache.all() |> Enum.sort() == more_keys - end - end - - describe "generation" do - test "created with unexpired entries", %{cache: cache, name: name} do - assert cache.put("foo", "bar") == :ok - assert cache.get("foo") == "bar" - assert cache.ttl("foo") == :infinity - - _ = new_generation(cache, name) - - assert cache.get("foo") == "bar" - end - - test "lifecycle", %{cache: cache, name: name} do - # should be empty - refute cache.get(1) - - # set some entries - for x <- 1..2, do: cache.put(x, x) - - # fetch one entry from new generation - assert cache.get(1) == 1 - - # fetch non-existent entries - refute cache.get(3) - refute cache.get(:non_existent) - - # create a new generation - _ = new_generation(cache, name) - - # both entries should be in the old generation - refute get_from_new(cache, name, 1) - refute get_from_new(cache, name, 2) - assert get_from_old(cache, name, 1) == 1 - assert get_from_old(cache, name, 2) == 2 - - # fetch entry 1 and put it into the new generation - assert cache.get(1) == 1 - assert get_from_new(cache, name, 1) == 1 - refute get_from_new(cache, name, 2) - refute get_from_old(cache, name, 1) - assert get_from_old(cache, name, 2) == 2 - - # create a new generation, the old generation should be deleted - _ = new_generation(cache, name) - - # entry 1 should be into the old generation and entry 2 deleted - refute get_from_new(cache, name, 1) - refute get_from_new(cache, name, 2) - assert get_from_old(cache, name, 1) == 1 - refute get_from_old(cache, name, 2) - end - - test "creation with ttl", %{cache: cache, name: name} do - assert cache.put(1, 1, ttl: 1000) == :ok - assert cache.get(1) == 1 - - _ = new_generation(cache, name) - - refute get_from_new(cache, name, 1) - assert get_from_old(cache, name, 1) == 1 - assert cache.get(1) == 1 - - :ok = Process.sleep(1100) - - refute cache.get(1) - refute get_from_new(cache, name, 1) - refute get_from_old(cache, name, 1) - end - end - - ## Helpers - - defp new_generation(cache, name) do - cache.with_dynamic_cache(name, fn -> - cache.new_generation() - end) - end - - defp get_from_new(cache, name, key) do - cache.with_dynamic_cache(name, fn -> - get_from(cache.newer_generation(), name, key) - end) - end - - defp get_from_old(cache, name, key) do - cache.with_dynamic_cache(name, fn -> - cache.generations() - |> List.last() - |> get_from(name, key) - end) - end - - defp get_from(gen, name, key) do - Adapter.with_meta(name, fn _, %{backend: backend} -> - case backend.lookup(gen, key) do - [] -> nil - [{_, ^key, val, _, _}] -> val - end - end) - end - - defp all_or_stream(cache, action, ms, opts \\ []) - - defp all_or_stream(cache, :all, ms, opts) do - ms - |> cache.all(opts) - |> handle_query_result() - end - - defp all_or_stream(cache, :stream, ms, opts) do - ms - |> cache.stream(opts) - |> handle_query_result() - end - - defp handle_query_result(list) when is_list(list) do - :lists.usort(list) - end - - defp handle_query_result(stream) do - stream - |> Enum.to_list() - |> :lists.usort() - end - end -end diff --git a/test/shared/multilevel_test.exs b/test/shared/multilevel_test.exs deleted file mode 100644 index 8b6a6cb3..00000000 --- a/test/shared/multilevel_test.exs +++ /dev/null @@ -1,284 +0,0 @@ -defmodule Nebulex.MultilevelTest do - import Nebulex.CacheCase - - deftests do - describe "c:init/1" do - test "fails because missing levels config", %{cache: cache} do - assert {:error, {%ArgumentError{message: msg}, _}} = cache.start_link(name: :missing_levels) - - assert Regex.match?( - ~r"expected levels: to be a list with at least one level definition", - msg - ) - end - end - - describe "entry:" do - test "put/3", %{cache: cache} do - assert cache.put(1, 1) == :ok - assert cache.get(1, level: 1) == 1 - assert cache.get(1, level: 2) == 1 - assert cache.get(1, level: 3) == 1 - - assert cache.put(2, 2, level: 2) == :ok - assert cache.get(2, level: 2) == 2 - refute cache.get(2, level: 1) - refute cache.get(2, level: 3) - - assert cache.put("foo", nil) == :ok - refute cache.get("foo") - end - - test "put_new/3", %{cache: cache} do - assert cache.put_new(1, 1) - refute cache.put_new(1, 2) - assert cache.get(1, level: 1) == 1 - assert cache.get(1, level: 2) == 1 - assert cache.get(1, level: 3) == 1 - - assert cache.put_new(2, 2, level: 2) - assert cache.get(2, level: 2) == 2 - refute cache.get(2, level: 1) - refute cache.get(2, level: 3) - - assert cache.put_new("foo", nil) - refute cache.get("foo") - end - - test "put_all/2", %{cache: cache} do - assert cache.put_all( - for x <- 1..3 do - {x, x} - end, - ttl: 1000 - ) == :ok - - for x <- 1..3, do: assert(cache.get(x) == x) - :ok = Process.sleep(1100) - for x <- 1..3, do: refute(cache.get(x)) - - assert cache.put_all(%{"apples" => 1, "bananas" => 3}) == :ok - assert cache.put_all(blueberries: 2, strawberries: 5) == :ok - assert cache.get("apples") == 1 - assert cache.get("bananas") == 3 - assert cache.get(:blueberries) == 2 - assert cache.get(:strawberries) == 5 - - assert cache.put_all([]) == :ok - assert cache.put_all(%{}) == :ok - - refute cache.put_new_all(%{"apples" => 100}) - assert cache.get("apples") == 1 - end - - test "get_all/2", %{cache: cache} do - assert cache.put_all(a: 1, c: 3) == :ok - assert cache.get_all([:a, :b, :c]) == %{a: 1, c: 3} - end - - test "delete/2", %{cache: cache} do - assert cache.put(1, 1) - assert cache.put(2, 2, level: 2) - - assert cache.delete(1) == :ok - refute cache.get(1, level: 1) - refute cache.get(1, level: 2) - refute cache.get(1, level: 3) - - assert cache.delete(2, level: 2) == :ok - refute cache.get(2, level: 1) - refute cache.get(2, level: 2) - refute cache.get(2, level: 3) - end - - test "take/2", %{cache: cache} do - assert cache.put(1, 1) == :ok - assert cache.put(2, 2, level: 2) == :ok - assert cache.put(3, 3, level: 3) == :ok - - assert cache.take(1) == 1 - assert cache.take(2) == 2 - assert cache.take(3) == 3 - - refute cache.get(1, level: 1) - refute cache.get(1, level: 2) - refute cache.get(1, level: 3) - refute cache.get(2, level: 2) - refute cache.get(3, level: 3) - end - - test "has_key?/1", %{cache: cache} do - assert cache.put(1, 1) == :ok - assert cache.put(2, 2, level: 2) == :ok - assert cache.put(3, 3, level: 3) == :ok - - assert cache.has_key?(1) - assert cache.has_key?(2) - assert cache.has_key?(3) - refute cache.has_key?(4) - end - - test "ttl/1", %{cache: cache} do - assert cache.put(:a, 1, ttl: 1000) == :ok - assert cache.ttl(:a) > 0 - assert cache.put(:b, 2) == :ok - - :ok = Process.sleep(10) - assert cache.ttl(:a) > 0 - assert cache.ttl(:b) == :infinity - refute cache.ttl(:c) - - :ok = Process.sleep(1100) - refute cache.ttl(:a) - end - - test "expire/2", %{cache: cache} do - assert cache.put(:a, 1) == :ok - assert cache.ttl(:a) == :infinity - - assert cache.expire(:a, 1000) - ttl = cache.ttl(:a) - assert ttl > 0 and ttl <= 1000 - - assert cache.get(:a, level: 1) == 1 - assert cache.get(:a, level: 2) == 1 - assert cache.get(:a, level: 3) == 1 - - :ok = Process.sleep(1100) - refute cache.get(:a) - refute cache.get(:a, level: 1) - refute cache.get(:a, level: 2) - refute cache.get(:a, level: 3) - end - - test "touch/1", %{cache: cache} do - assert cache.put(:touch, 1, ttl: 1000, level: 2) == :ok - - :ok = Process.sleep(10) - assert cache.touch(:touch) - - :ok = Process.sleep(200) - assert cache.touch(:touch) - assert cache.get(:touch) == 1 - - :ok = Process.sleep(1100) - refute cache.get(:touch) - - refute cache.touch(:non_existent) - end - - test "get_and_update/3", %{cache: cache} do - assert cache.put(1, 1, level: 1) == :ok - assert cache.put(2, 2) == :ok - - assert cache.get_and_update(1, &{&1, &1 * 2}, level: 1) == {1, 2} - assert cache.get(1, level: 1) == 2 - refute cache.get(1, level: 3) - refute cache.get(1, level: 3) - - assert cache.get_and_update(2, &{&1, &1 * 2}) == {2, 4} - assert cache.get(2, level: 1) == 4 - assert cache.get(2, level: 2) == 4 - assert cache.get(2, level: 3) == 4 - - assert cache.get_and_update(1, fn _ -> :pop end, level: 1) == {2, nil} - refute cache.get(1, level: 1) - - assert cache.get_and_update(2, fn _ -> :pop end) == {4, nil} - refute cache.get(2, level: 1) - refute cache.get(2, level: 2) - refute cache.get(2, level: 3) - end - - test "update/4", %{cache: cache} do - assert cache.put(1, 1, level: 1) == :ok - assert cache.put(2, 2) == :ok - - assert cache.update(1, 1, &(&1 * 2), level: 1) == 2 - assert cache.get(1, level: 1) == 2 - refute cache.get(1, level: 2) - refute cache.get(1, level: 3) - - assert cache.update(2, 1, &(&1 * 2)) == 4 - assert cache.get(2, level: 1) == 4 - assert cache.get(2, level: 2) == 4 - assert cache.get(2, level: 3) == 4 - end - - test "incr/3", %{cache: cache} do - assert cache.incr(1) == 1 - assert cache.get(1, level: 1) == 1 - assert cache.get(1, level: 2) == 1 - assert cache.get(1, level: 3) == 1 - - assert cache.incr(2, 2, level: 2) == 2 - assert cache.get(2, level: 2) == 2 - refute cache.get(2, level: 1) - refute cache.get(2, level: 3) - - assert cache.incr(3, 3) == 3 - assert cache.get(3, level: 1) == 3 - assert cache.get(3, level: 2) == 3 - assert cache.get(3, level: 3) == 3 - - assert cache.incr(4, 5) == 5 - assert cache.incr(4, -5) == 0 - assert cache.get(4, level: 1) == 0 - assert cache.get(4, level: 2) == 0 - assert cache.get(4, level: 3) == 0 - end - end - - describe "queryable:" do - test "all/2 and stream/2", %{cache: cache} do - for x <- 1..30, do: cache.put(x, x, level: 1) - for x <- 20..60, do: cache.put(x, x, level: 2) - for x <- 50..100, do: cache.put(x, x, level: 3) - - expected = :lists.usort(for x <- 1..100, do: x) - assert :lists.usort(cache.all()) == expected - - stream = cache.stream() - - assert stream - |> Enum.to_list() - |> :lists.usort() == expected - - del = - for x <- 20..60 do - assert cache.delete(x) == :ok - x - end - - expected = :lists.usort(expected -- del) - assert :lists.usort(cache.all()) == expected - end - - test "delete_all/2", %{cache: cache} do - for x <- 1..30, do: cache.put(x, x, level: 1) - for x <- 21..60, do: cache.put(x, x, level: 2) - for x <- 51..100, do: cache.put(x, x, level: 3) - - assert count = cache.count_all() - assert cache.delete_all() == count - assert cache.all() == [] - end - - test "count_all/2", %{cache: cache} do - assert cache.count_all() == 0 - for x <- 1..10, do: cache.put(x, x, level: 1) - for x <- 11..20, do: cache.put(x, x, level: 2) - for x <- 21..30, do: cache.put(x, x, level: 3) - assert cache.count_all() == 30 - - for x <- [1, 11, 21], do: cache.delete(x, level: 1) - assert cache.count_all() == 29 - - assert cache.delete(1, level: 1) == :ok - assert cache.delete(11, level: 2) == :ok - assert cache.delete(21, level: 3) == :ok - assert cache.count_all() == 27 - end - end - end -end diff --git a/test/support/cache_case.ex b/test/support/cache_case.exs similarity index 89% rename from test/support/cache_case.ex rename to test/support/cache_case.exs index 3455069d..59e7796e 100644 --- a/test/support/cache_case.ex +++ b/test/support/cache_case.exs @@ -1,6 +1,8 @@ defmodule Nebulex.CacheCase do @moduledoc false + use ExUnit.CaseTemplate + alias Nebulex.Telemetry @doc false @@ -44,6 +46,7 @@ defmodule Nebulex.CacheCase do on_exit(fn -> try do :ok = Process.sleep(20) + if Process.alive?(pid), do: Supervisor.stop(pid, :normal, 5000) catch :exit, _ -> :noop @@ -65,11 +68,13 @@ defmodule Nebulex.CacheCase do default_dynamic_cache = cache.get_dynamic_cache() {:ok, pid} = cache.start_link([name: name] ++ opts) + _ = cache.put_dynamic_cache(name) on_exit(fn -> try do :ok = Process.sleep(20) + if Process.alive?(pid), do: Supervisor.stop(pid, :normal, 5000) catch :exit, _ -> :noop @@ -86,13 +91,16 @@ defmodule Nebulex.CacheCase do @doc false def test_with_dynamic_cache(cache, opts \\ [], callback) do default_dynamic_cache = cache.get_dynamic_cache() + {:ok, pid} = cache.start_link(opts) try do _ = cache.put_dynamic_cache(pid) + callback.() after _ = cache.put_dynamic_cache(default_dynamic_cache) + Supervisor.stop(pid) end end @@ -107,6 +115,7 @@ defmodule Nebulex.CacheCase do rescue _ -> :ok = Process.sleep(delay) + wait_until(retries - 1, delay, fun) end @@ -114,7 +123,9 @@ defmodule Nebulex.CacheCase do def cache_put(cache, lst, fun \\ & &1, opts \\ []) do for key <- lst do value = fun.(key) + :ok = cache.put(key, value, opts) + value end end @@ -138,4 +149,18 @@ defmodule Nebulex.CacheCase do def handle_event(event, measurements, metadata, %{pid: pid}) do send(pid, {event, measurements, metadata}) end + + @doc false + def assert_error_module(ctx, error_module) do + fun = Map.get(ctx, :error_module, fn m -> assert m == Nebulex.Error end) + + fun.(error_module) + end + + @doc false + def assert_error_reason(ctx, error_reason) do + fun = Map.get(ctx, :error_reason, fn r -> assert r == :error end) + + fun.(error_reason) + end end diff --git a/test/support/cluster.ex b/test/support/cluster.ex deleted file mode 100644 index a8125f96..00000000 --- a/test/support/cluster.ex +++ /dev/null @@ -1,88 +0,0 @@ -defmodule Nebulex.Cluster do - @moduledoc """ - Taken from `Phoenix.PubSub.Cluster`. - Copyright (c) 2014 Chris McCord - """ - - def spawn(nodes) do - # Turn node into a distributed node with the given long name - _ = :net_kernel.start([:"primary@127.0.0.1"]) - - # Allow spawned nodes to fetch all code from this node - _ = :erl_boot_server.start([]) - _ = allow_boot(to_charlist("127.0.0.1")) - - nodes - |> Enum.map(&Task.async(fn -> spawn_node(&1) end)) - |> Enum.map(&Task.await(&1, 30_000)) - end - - def spawn_node(node_host) do - {:ok, node} = start_peer(node_host) - - _ = add_code_paths(node) - _ = transfer_configuration(node) - _ = ensure_applications_started(node) - - {:ok, node} - end - - if Code.ensure_loaded?(:peer) do - defp start_peer(node_host) do - {:ok, _pid, node} = - :peer.start(%{ - name: node_name(node_host), - host: to_charlist("127.0.0.1"), - args: [inet_loader_args()] - }) - - {:ok, node} - end - else - defp start_peer(node_host) do - :slave.start(to_charlist("127.0.0.1"), node_name(node_host), inet_loader_args()) - end - end - - defp rpc(node, module, function, args) do - :rpc.block_call(node, module, function, args) - end - - defp inet_loader_args do - to_charlist("-loader inet -hosts 127.0.0.1 -setcookie #{:erlang.get_cookie()}") - end - - defp allow_boot(host) do - {:ok, ipv4} = :inet.parse_ipv4_address(host) - :erl_boot_server.add_slave(ipv4) - end - - defp add_code_paths(node) do - rpc(node, :code, :add_paths, [:code.get_path()]) - end - - defp transfer_configuration(node) do - for {app_name, _, _} <- Application.loaded_applications() do - for {key, val} <- Application.get_all_env(app_name) do - rpc(node, Application, :put_env, [app_name, key, val]) - end - end - end - - defp ensure_applications_started(node) do - rpc(node, Application, :ensure_all_started, [:mix]) - rpc(node, Mix, :env, [Mix.env()]) - - for {app_name, _, _} <- Application.loaded_applications(), app_name not in [:dialyxir] do - rpc(node, Application, :ensure_all_started, [app_name]) - end - end - - defp node_name(node_host) do - node_host - |> to_string() - |> String.split("@") - |> Enum.at(0) - |> String.to_atom() - end -end diff --git a/test/support/fake_adapter.exs b/test/support/fake_adapter.exs new file mode 100644 index 00000000..8315deac --- /dev/null +++ b/test/support/fake_adapter.exs @@ -0,0 +1,77 @@ +defmodule Nebulex.FakeAdapter do + @moduledoc false + + ## Nebulex.Adapter + + @doc false + defmacro __before_compile__(_), do: :ok + + @doc false + def init(_opts) do + child_spec = Supervisor.child_spec({Agent, fn -> :ok end}, id: Agent) + + {:ok, child_spec, %{}} + end + + ## Nebulex.Adapter.KV + + @doc false + def fetch(_, _, _), do: {:error, %Nebulex.Error{reason: :error}} + + @doc false + def put(_, :error, reason, _, _, _), do: {:error, reason} + def put(_, _, _, _, _, _), do: {:error, %Nebulex.Error{reason: :error}} + + @doc false + def delete(_, _, _), do: {:error, %Nebulex.Error{reason: :error}} + + @doc false + def take(_, _, _), do: {:error, %Nebulex.Error{reason: :error}} + + @doc false + def has_key?(_, _, _), do: {:error, %Nebulex.Error{reason: :error}} + + @doc false + def ttl(_, _, _), do: {:error, %Nebulex.Error{reason: :error}} + + @doc false + def expire(_, _, _, _), do: {:error, %Nebulex.Error{reason: :error}} + + @doc false + def touch(_, _, _), do: {:error, %Nebulex.Error{reason: :error}} + + @doc false + def update_counter(_, _, _, _, _, _), do: {:error, %Nebulex.Error{reason: :error}} + + @doc false + def put_all(_, _, _, _, _), do: {:error, %Nebulex.Error{reason: :error}} + + ## Nebulex.Adapter.Queryable + + @doc false + def execute(_, _, _, _), do: {:error, %Nebulex.Error{reason: :error}} + + @doc false + def stream(_, _, _), do: {:error, %Nebulex.Error{reason: :error}} + + ## Nebulex.Adapter.Persistence + + @doc false + def dump(_, _, _), do: {:error, %Nebulex.Error{reason: :error}} + + @doc false + def load(_, _, _), do: {:error, %Nebulex.Error{reason: :error}} + + ## Nebulex.Adapter.Info + + @doc false + def info(_, _, _), do: {:error, %Nebulex.Error{reason: :error}} + + ## Nebulex.Adapter.Transaction + + @doc false + def transaction(_, _, _), do: {:error, %Nebulex.Error{reason: :error}} + + @doc false + def in_transaction?(_, _), do: {:error, %Nebulex.Error{reason: :error}} +end diff --git a/test/support/node_case.ex b/test/support/node_case.ex deleted file mode 100644 index 179cc4a5..00000000 --- a/test/support/node_case.ex +++ /dev/null @@ -1,43 +0,0 @@ -defmodule Nebulex.NodeCase do - @moduledoc """ - Based on `Phoenix.PubSub.NodeCase`. - Copyright (c) 2014 Chris McCord - """ - - @timeout 5000 - - defmacro __using__(_opts) do - quote do - use ExUnit.Case, async: true - import unquote(__MODULE__) - @moduletag :clustered - - @timeout unquote(@timeout) - end - end - - def start_caches(nodes, caches) do - for node <- nodes, {cache, opts} <- caches do - {:ok, pid} = start_cache(node, cache, opts) - {node, cache, pid} - end - end - - def start_cache(node, cache, opts \\ []) do - rpc(node, cache, :start_link, [opts]) - end - - def stop_caches(node_pid_list) do - Enum.each(node_pid_list, fn {node, _cache, pid} -> - stop_cache(node, pid) - end) - end - - def stop_cache(node, pid) do - rpc(node, Supervisor, :stop, [pid, :normal, @timeout]) - end - - def rpc(node, module, function, args) do - :rpc.block_call(node, module, function, args) - end -end diff --git a/test/support/test_adapter.exs b/test/support/test_adapter.exs new file mode 100644 index 00000000..337fb6d0 --- /dev/null +++ b/test/support/test_adapter.exs @@ -0,0 +1,557 @@ +defmodule Nebulex.TestAdapter do + @moduledoc """ + Adapter for testing purposes. + """ + + defmodule Entry do + @moduledoc false + + defstruct value: nil, touched: nil, exp: nil + + alias Nebulex.Time + + @doc false + def new(value, ttl \\ :infinity, touched \\ Time.now()) do + %__MODULE__{ + value: value, + touched: touched, + exp: exp(ttl) + } + end + + @doc false + def exp(now \\ Time.now(), ttl) + + def exp(_now, :infinity), do: :infinity + def exp(now, ttl), do: now + ttl + end + + # Provide Cache Implementation + @behaviour Nebulex.Adapter + @behaviour Nebulex.Adapter.KV + @behaviour Nebulex.Adapter.Queryable + @behaviour Nebulex.Adapter.Persistence + + # Inherit default transaction implementation + use Nebulex.Adapter.Transaction + + # Inherit default info implementation + use Nebulex.Adapters.Common.Info + + import Nebulex.Utils + + alias Nebulex.Adapters.Common.Info.Stats + alias __MODULE__.{Entry, KV} + alias Nebulex.Time + + ## Nebulex.Adapter + + @impl true + defmacro __before_compile__(_env), do: :ok + + @impl true + def init(opts) do + # Required options + telemetry = Keyword.fetch!(opts, :telemetry) + telemetry_prefix = Keyword.fetch!(opts, :telemetry_prefix) + + # Init stats_counter + stats_counter = + if Keyword.get(opts, :stats, true) == true do + Stats.init(opts) + end + + # Adapter meta + metadata = %{ + telemetry: telemetry, + telemetry_prefix: telemetry_prefix, + stats_counter: stats_counter, + started_at: DateTime.utc_now() + } + + # KV server + child_spec = Supervisor.child_spec({KV, [adapter_meta: metadata] ++ opts}, id: KV) + + {:ok, child_spec, metadata} + end + + ## Nebulex.Adapter.KV + + @impl true + def fetch(adapter_meta, key, _opts) do + with {:ok, %Entry{value: value}} <- do_fetch(adapter_meta, key) do + {:ok, value} + end + end + + defp do_fetch(_adapter_meta, {:eval, fun}) do + fun.() + end + + defp do_fetch(adapter_meta, key) do + adapter_meta.pid + |> GenServer.call({:fetch, key}) + |> validate_ttl(key, adapter_meta) + end + + defp validate_ttl({:ok, %Entry{exp: :infinity} = entry}, _key, _adapter_meta) do + {:ok, entry} + end + + defp validate_ttl( + {:ok, %Entry{exp: exp} = entry}, + key, + %{ + name: name, + cache: cache, + pid: pid + } = adapter_meta + ) + when is_integer(exp) do + if Time.now() >= exp do + :ok = delete(adapter_meta, key, []) + + wrap_error Nebulex.KeyError, key: key, cache: name || {cache, pid}, reason: :expired + else + {:ok, entry} + end + end + + defp validate_ttl(:error, key, %{name: name, cache: cache, pid: pid}) do + wrap_error Nebulex.KeyError, key: key, cache: name || {cache, pid}, reason: :not_found + end + + @impl true + def put(adapter_meta, key, value, ttl, on_write, _opts) do + do_put(adapter_meta.pid, key, Entry.new(value, ttl), on_write) + end + + defp do_put(pid, key, entry, :put) do + GenServer.call(pid, {:put, key, entry}) + end + + defp do_put(pid, key, entry, :put_new) do + GenServer.call(pid, {:put_new, key, entry}) + end + + defp do_put(pid, key, entry, :replace) do + GenServer.call(pid, {:replace, key, entry}) + end + + @impl true + def put_all(adapter_meta, entries, ttl, on_write, _opts) do + entries = for {key, value} <- entries, into: %{}, do: {key, Entry.new(value, ttl)} + + do_put_all(adapter_meta.pid, entries, on_write) + end + + defp do_put_all(pid, entries, :put) do + GenServer.call(pid, {:put_all, entries}) + end + + defp do_put_all(pid, entries, :put_new) do + GenServer.call(pid, {:put_new_all, entries}) + end + + @impl true + def delete(adapter_meta, key, _opts) do + GenServer.call(adapter_meta.pid, {:delete, key}) + end + + @impl true + def take(adapter_meta, key, _opts) do + with {:ok, %Entry{value: value}} <- + adapter_meta.pid + |> GenServer.call({:pop, key}) + |> validate_ttl(key, adapter_meta) do + {:ok, value} + end + end + + @impl true + def update_counter(adapter_meta, key, amount, ttl, default, _opts) do + _ = do_fetch(adapter_meta, key) + + GenServer.call( + adapter_meta.pid, + {:update_counter, key, amount, Entry.new(default + amount, ttl)} + ) + end + + @impl true + def has_key?(adapter_meta, key, _opts) do + case fetch(%{adapter_meta | telemetry: false}, key, []) do + {:ok, _} -> {:ok, true} + {:error, _} -> {:ok, false} + end + end + + @impl true + def ttl(adapter_meta, key, _opts) do + with {:ok, entry} <- do_fetch(adapter_meta, key) do + {:ok, entry_ttl(entry)} + end + end + + @impl true + def expire(adapter_meta, key, ttl, _opts) do + GenServer.call(adapter_meta.pid, {:expire, key, ttl}) + end + + @impl true + def touch(adapter_meta, key, _opts) do + GenServer.call(adapter_meta.pid, {:touch, key}) + end + + ## Nebulex.Adapter.Queryable + + @impl true + def execute(adapter_meta, operation, query, opts) + + def execute(_adapter_meta, :get_all, {:in, []}, _opts) do + {:ok, []} + end + + def execute(_adapter_meta, :count_all, {:in, []}, _opts) do + {:ok, 0} + end + + def execute(_adapter_meta, :delete_all, {:in, []}, _opts) do + {:ok, 0} + end + + def execute(adapter_meta, operation, query, opts) do + with {:ok, query} <- assert_query(query) do + GenServer.call(adapter_meta.pid, {:q, operation, query, opts}) + end + end + + @impl true + def stream(adapter_meta, query, opts) do + with {:ok, query} <- assert_query(query) do + GenServer.call(adapter_meta.pid, {:q, :stream, query, opts}) + end + end + + defp assert_query(query) + when is_nil(query) or query == :unexpired or (is_tuple(query) and elem(query, 0) == :in) do + {:ok, query} + end + + defp assert_query(query) do + wrap_error Nebulex.Error, reason: :invalid_query, query: query, cache: nil + end + + ## Nebulex.Adapter.Persistence + + @impl true + def dump(%{cache: cache}, path, opts) do + with_file(cache, path, [:write], fn io_dev -> + with {:ok, stream} <- cache.stream(:unexpired, return: :entries) do + stream + |> Stream.chunk_every(Keyword.get(opts, :entries_per_line, 10)) + |> Enum.each(fn chunk -> + bin = + chunk + |> :erlang.term_to_binary(get_compression(opts)) + |> Base.encode64() + + :ok = IO.puts(io_dev, bin) + end) + end + end) + end + + @impl true + def load(%{cache: cache}, path, opts) do + with_file(cache, path, [:read], fn io_dev -> + io_dev + |> IO.stream(:line) + |> Stream.map(&String.trim/1) + |> Enum.each(fn line -> + entries = + line + |> Base.decode64!() + |> :erlang.binary_to_term([:safe]) + + cache.put_all(entries, opts) + end) + end) + end + + # sobelow_skip ["Traversal.FileModule"] + defp with_file(cache, path, modes, function) do + case File.open(path, modes) do + {:ok, io_device} -> + try do + function.(io_device) + after + :ok = File.close(io_device) + end + + {:error, reason} -> + reason = %File.Error{reason: reason, action: "open", path: path} + + wrap_error Nebulex.Error, reason: reason, cache: cache + end + end + + defp get_compression(opts) do + case Keyword.get(opts, :compression) do + value when is_integer(value) and value >= 0 and value < 10 -> + [compressed: value] + + _ -> + [:compressed] + end + end + + ## Nebulex.Adapter.Info + + @impl true + def info(adapter_meta, spec, opts) do + cond do + spec == :all -> + with {:ok, info} <- GenServer.call(adapter_meta.pid, {:info, [:memory]}), + {:ok, base_info} <- super(adapter_meta, :all, opts) do + {:ok, Map.merge(base_info, info)} + end + + spec == :memory -> + GenServer.call(adapter_meta.pid, {:info, spec}) + + is_list(spec) and Enum.member?(spec, :memory) -> + with {:ok, info} <- GenServer.call(adapter_meta.pid, {:info, [:memory]}), + spec = Enum.reject(spec, &(&1 == :memory)), + {:ok, base_info} <- super(adapter_meta, spec, opts) do + {:ok, Map.merge(base_info, info)} + end + + true -> + super(adapter_meta, spec, opts) + end + end + + ## Helpers + + defp entry_ttl(%Entry{exp: :infinity}), do: :infinity + defp entry_ttl(%Entry{exp: exp}), do: exp - Time.now() +end + +defmodule Nebulex.TestAdapter.KV do + @moduledoc false + + use GenServer + + import Nebulex.Utils, only: [wrap_error: 2] + + alias Nebulex.TestAdapter.Entry + alias Nebulex.Time + + ## Internals + + # Internal state + defstruct map: nil, adapter_meta: nil + + ## API + + @spec start_link(keyword) :: GenServer.on_start() + def start_link(opts) do + GenServer.start_link(__MODULE__, opts) + end + + ## GenServer callbacks + + @impl true + def init(opts) do + {:ok, %__MODULE__{map: %{}, adapter_meta: Keyword.fetch!(opts, :adapter_meta)}} + end + + @impl true + def handle_call(request, from, state) + + def handle_call({:fetch, key}, _from, %__MODULE__{map: map} = state) do + {:reply, Map.fetch(map, key), state} + end + + def handle_call({:put, key, value}, _from, %__MODULE__{map: map} = state) do + {:reply, {:ok, true}, %{state | map: Map.put(map, key, value)}} + end + + def handle_call({:put_new, key, value}, _from, %__MODULE__{map: map} = state) do + case Map.has_key?(map, key) do + true -> + {:reply, {:ok, false}, state} + + false -> + {:reply, {:ok, true}, %{state | map: Map.put_new(map, key, value)}} + end + end + + def handle_call({:replace, key, value}, _from, %__MODULE__{map: map} = state) do + case Map.has_key?(map, key) do + true -> + {:reply, {:ok, true}, %{state | map: Map.replace(map, key, value)}} + + false -> + {:reply, {:ok, false}, state} + end + end + + def handle_call({:put_all, entries}, _from, %__MODULE__{map: map} = state) do + {:reply, {:ok, true}, %{state | map: Map.merge(map, entries)}} + end + + def handle_call({:put_new_all, entries}, _from, %__MODULE__{map: map} = state) do + case Enum.any?(map, fn {k, _} -> Map.has_key?(entries, k) end) do + true -> + {:reply, {:ok, false}, state} + + false -> + {:reply, {:ok, true}, %{state | map: Map.merge(map, entries)}} + end + end + + def handle_call({:delete, key}, _from, %__MODULE__{map: map} = state) do + {:reply, :ok, %{state | map: Map.delete(map, key)}} + end + + def handle_call({:pop, key}, _from, %__MODULE__{map: map} = state) do + ref = make_ref() + + case Map.pop(map, key, ref) do + {^ref, _map} -> + {:reply, :error, state} + + {value, map} -> + {:reply, {:ok, value}, %{state | map: map}} + end + end + + def handle_call({:update_counter, key, amount, default}, _from, %__MODULE__{map: map} = state) do + case Map.fetch(map, key) do + {:ok, %{value: value}} when not is_integer(value) -> + error = wrap_error Nebulex.Error, reason: :badarith, cache: nil + + {:reply, error, map} + + _other -> + map = Map.update(map, key, default, &%{&1 | value: &1.value + amount}) + counter = Map.fetch!(map, key) + + {:reply, {:ok, counter.value}, %{state | map: map}} + end + end + + def handle_call({:expire, key, ttl}, _from, %__MODULE__{map: map} = state) do + case Map.has_key?(map, key) do + true -> + {:reply, {:ok, true}, %{state | map: Map.update!(map, key, &%{&1 | exp: Entry.exp(ttl)})}} + + false -> + {:reply, {:ok, false}, state} + end + end + + def handle_call({:touch, key}, _from, %__MODULE__{map: map} = state) do + case Map.has_key?(map, key) do + true -> + {:reply, {:ok, true}, %{state | map: Map.update!(map, key, &%{&1 | touched: Time.now()})}} + + false -> + {:reply, {:ok, false}, state} + end + end + + def handle_call({:q, :get_all, nil, opts}, _from, %__MODULE__{map: map} = state) do + {:reply, {:ok, return(Enum, map, opts)}, state} + end + + def handle_call({:q, :get_all, :unexpired, opts}, _from, %__MODULE__{map: map} = state) do + {:reply, {:ok, return(Enum, filter_unexpired(map), opts)}, state} + end + + def handle_call({:q, :get_all, {:in, keys}, opts}, _from, %__MODULE__{map: map} = state) do + map = map |> Map.take(keys) |> filter_unexpired() + + {:reply, {:ok, return(Enum, map, opts)}, state} + end + + def handle_call({:q, :count_all, nil, _opts}, _from, %__MODULE__{map: map} = state) do + {:reply, {:ok, map_size(map)}, state} + end + + def handle_call({:q, :count_all, {:in, keys}, _opts}, _from, %__MODULE__{map: map} = state) do + count = map |> Map.take(keys) |> map_size() + + {:reply, {:ok, count}, state} + end + + def handle_call({:q, :delete_all, nil, _opts}, _from, %__MODULE__{map: map} = state) do + {:reply, {:ok, map_size(map)}, %{state | map: %{}}} + end + + def handle_call({:q, :delete_all, {:in, keys}, _opts}, _from, %__MODULE__{map: map} = state) do + total_count = map_size(map) + map = Map.drop(map, keys) + + {:reply, {:ok, total_count - map_size(map)}, %{state | map: map}} + end + + def handle_call({:q, :stream, nil, opts}, _from, %__MODULE__{map: map} = state) do + {:reply, {:ok, return(Stream, map, opts)}, state} + end + + def handle_call({:q, :stream, :unexpired, opts}, _from, %__MODULE__{map: map} = state) do + {:reply, {:ok, return(Stream, filter_unexpired(map), opts)}, state} + end + + def handle_call({:info, :memory}, _from, %__MODULE__{map: map} = state) do + {:reply, {:ok, memory(map)}, state} + end + + def handle_call({:info, [_ | _]}, _from, %__MODULE__{map: map} = state) do + {:reply, {:ok, %{memory: memory(map)}}, state} + end + + ## Private Functions + + defp filter_unexpired(enum) do + now = Time.now() + + for {k, %Entry{exp: exp} = e} <- enum, exp > now, into: %{} do + {k, e} + end + end + + defp return(module, map, opts) do + case Keyword.get(opts, :return, :entries) do + :keys -> + module.map(map, fn {k, _e} -> k end) + + :values -> + module.map(map, fn {_k, e} -> e.value end) + + :entries -> + module.map(map, fn {k, e} -> {k, e.value} end) + end + end + + defp memory(map) when map_size(map) == 0 do + %{ + # Fixed + allocated_memory: 1_000_000, + # Empty + used_memory: 0 + } + end + + defp memory(map) do + %{ + # Fixed + allocated_memory: 1_000_000, + # Fake size + used_memory: map |> :erlang.term_to_binary() |> byte_size() + } + end +end diff --git a/test/support/test_cache.ex b/test/support/test_cache.ex deleted file mode 100644 index d0761c80..00000000 --- a/test/support/test_cache.ex +++ /dev/null @@ -1,216 +0,0 @@ -defmodule Nebulex.TestCache do - @moduledoc false - - defmodule Common do - @moduledoc false - - defmacro __using__(_opts) do - quote do - def get_and_update_fun(nil), do: {nil, 1} - def get_and_update_fun(current) when is_integer(current), do: {current, current * 2} - - def get_and_update_bad_fun(_), do: :other - end - end - end - - defmodule TestHook do - @moduledoc false - use GenServer - - alias Nebulex.Hook - - @actions [:get, :put] - - def start_link(opts \\ []) do - GenServer.start_link(__MODULE__, opts, name: __MODULE__) - end - - ## Hook Function - - def track(%Hook{step: :before, name: name}) when name in @actions do - System.system_time(:microsecond) - end - - def track(%Hook{step: :after_return, name: name} = event) when name in @actions do - GenServer.cast(__MODULE__, {:track, event}) - end - - def track(hook), do: hook - - ## Error Hook Function - - def hook_error(%Hook{name: :get}), do: raise(ArgumentError, "error") - - def hook_error(hook), do: hook - - ## GenServer - - @impl GenServer - def init(_opts) do - {:ok, %{}} - end - - @impl GenServer - def handle_cast({:track, %Hook{acc: start} = hook}, state) do - _ = send(:hooked_cache, %{hook | acc: System.system_time(:microsecond) - start}) - {:noreply, state} - end - end - - defmodule Cache do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - - use Nebulex.TestCache.Common - end - - defmodule Partitioned do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Partitioned - - use Nebulex.TestCache.Common - end - - defmodule Replicated do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Replicated - - use Nebulex.TestCache.Common - end - - defmodule Multilevel do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Multilevel - - defmodule L1 do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Local - end - - defmodule L2 do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Replicated - end - - defmodule L3 do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Partitioned - end - end - - ## Mocks - - defmodule AdapterMock do - @moduledoc false - @behaviour Nebulex.Adapter - @behaviour Nebulex.Adapter.Entry - @behaviour Nebulex.Adapter.Queryable - - @impl true - defmacro __before_compile__(_), do: :ok - - @impl true - def init(opts) do - child = { - {Agent, System.unique_integer([:positive, :monotonic])}, - {Agent, :start_link, [fn -> :ok end, [name: opts[:child_name]]]}, - :permanent, - 5_000, - :worker, - [Agent] - } - - {:ok, child, %{}} - end - - @impl true - def get(_, key, _) do - if is_integer(key) do - raise ArgumentError, "Error" - else - :ok - end - end - - @impl true - def put(_, _, _, _, _, _) do - :ok = Process.sleep(1000) - true - end - - @impl true - def delete(_, _, _), do: :ok - - @impl true - def take(_, _, _), do: nil - - @impl true - def has_key?(_, _), do: true - - @impl true - def ttl(_, _), do: nil - - @impl true - def expire(_, _, _), do: true - - @impl true - def touch(_, _), do: true - - @impl true - def update_counter(_, _, _, _, _, _), do: 1 - - @impl true - def get_all(_, _, _) do - :ok = Process.sleep(1000) - %{} - end - - @impl true - def put_all(_, _, _, _, _), do: Process.exit(self(), :normal) - - @impl true - def execute(_, :count_all, _, _) do - _ = Process.exit(self(), :normal) - 0 - end - - def execute(_, :delete_all, _, _) do - Process.sleep(2000) - 0 - end - - @impl true - def stream(_, _, _), do: 1..10 - end - - defmodule PartitionedMock do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Partitioned, - primary_storage_adapter: Nebulex.TestCache.AdapterMock - end - - defmodule ReplicatedMock do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex, - adapter: Nebulex.Adapters.Replicated, - primary_storage_adapter: Nebulex.TestCache.AdapterMock - end -end diff --git a/test/support/test_cache.exs b/test/support/test_cache.exs new file mode 100644 index 00000000..3b3e2458 --- /dev/null +++ b/test/support/test_cache.exs @@ -0,0 +1,115 @@ +defmodule Nebulex.TestCache do + @moduledoc false + + defmodule Common do + @moduledoc false + + defmacro __using__(_opts) do + quote do + def get_and_update_fun(nil), do: {nil, 1} + def get_and_update_fun(current) when is_integer(current), do: {current, current * 2} + + def get_and_update_bad_fun(_), do: :other + end + end + end + + defmodule Cache do + @moduledoc false + use Nebulex.Cache, + otp_app: :nebulex, + adapter: Nebulex.TestAdapter + + use Nebulex.TestCache.Common + end + + ## Mocks + + defmodule AdapterMock do + @moduledoc false + @behaviour Nebulex.Adapter + @behaviour Nebulex.Adapter.KV + @behaviour Nebulex.Adapter.Queryable + + @impl true + defmacro __before_compile__(_), do: :ok + + @impl true + def init(opts) do + child = { + {Agent, System.unique_integer([:positive, :monotonic])}, + {Agent, :start_link, [fn -> :ok end, [name: opts[:child_name]]]}, + :permanent, + 5_000, + :worker, + [Agent] + } + + {:ok, child, %{}} + end + + @impl true + def fetch(_, key, _) do + if is_integer(key) do + raise ArgumentError, "Error" + else + {:ok, :ok} + end + end + + @impl true + def put(_, _, _, _, _, _) do + :ok = Process.sleep(1000) + + {:ok, true} + end + + @impl true + def delete(_, _, _), do: :ok + + @impl true + def take(_, _, _), do: {:ok, nil} + + @impl true + def has_key?(_, _, _), do: {:ok, true} + + @impl true + def ttl(_, _, _), do: {:ok, nil} + + @impl true + def expire(_, _, _, _), do: {:ok, true} + + @impl true + def touch(_, _, _), do: {:ok, true} + + @impl true + def update_counter(_, _, _, _, _, _), do: {:ok, 1} + + @impl true + def put_all(_, _, _, _, _) do + {:ok, Process.exit(self(), :normal)} + end + + @impl true + def execute(_, :get_all, _, _) do + :ok = Process.sleep(1000) + + {:ok, []} + end + + def execute(_, :count_all, _, _) do + _ = Process.exit(self(), :normal) + + {:ok, 0} + end + + def execute(_, :delete_all, _, _) do + :ok = Process.sleep(2000) + + {:ok, 0} + end + + @impl true + def stream(_, _, _), do: {:ok, 1..10} + end +end diff --git a/test/test_helper.exs b/test/test_helper.exs index 0b1736e8..2adc0d1f 100644 --- a/test/test_helper.exs +++ b/test/test_helper.exs @@ -1,25 +1,31 @@ -# Start Telemetry -_ = Application.start(:telemetry) +# Load support modules +Code.require_file("support/test_adapter.exs", __DIR__) +Code.require_file("support/fake_adapter.exs", __DIR__) +Code.require_file("support/test_cache.exs", __DIR__) +Code.require_file("support/cache_case.exs", __DIR__) -# Set nodes -nodes = [:"node1@127.0.0.1", :"node2@127.0.0.1", :"node3@127.0.0.1", :"node4@127.0.0.1"] -:ok = Application.put_env(:nebulex, :nodes, nodes) - -# Load shared tests +# Load shared test cases for file <- File.ls!("test/shared/cache") do Code.require_file("./shared/cache/" <> file, __DIR__) end +# Load shared test cases for file <- File.ls!("test/shared"), not File.dir?("test/shared/" <> file) do Code.require_file("./shared/" <> file, __DIR__) end -# Spawn remote nodes -unless :clustered in Keyword.get(ExUnit.configuration(), :exclude, []) do - Nebulex.Cluster.spawn(nodes) -end +# Mocks +[ + Mix.Project, + Nebulex.Cache.Registry +] +|> Enum.each(&Mimic.copy/1) + +# Start Telemetry +_ = Application.start(:telemetry) -# For mix tests +# For tasks/generators testing +Mix.start() Mix.shell(Mix.Shell.Process) # Start ExUnit