From 6ccfcdc1051d9d405f694882beb4f306ae518b5c Mon Sep 17 00:00:00 2001 From: Riadh Ghaddab Date: Fri, 11 Oct 2024 10:06:38 +0200 Subject: [PATCH] [nrf noup] settings: zms: use dedicated lookup cache hash function Introduce ZMS_LOOKUP_CACHE_FOR_SETTINGS Kconfig option that enables a dedicated hash function for the ZMS lookup cache that takes advantage of the ZMS ID allocation scheme used by the ZMS settings backend. As such, this option should only be used if an application uses ZMS via the settings layer. Signed-off-by: Riadh Ghaddab --- subsys/fs/zms/Kconfig | 9 +++++++++ subsys/fs/zms/zms.c | 45 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+) diff --git a/subsys/fs/zms/Kconfig b/subsys/fs/zms/Kconfig index bc1dae78279..c9514608da8 100644 --- a/subsys/fs/zms/Kconfig +++ b/subsys/fs/zms/Kconfig @@ -51,6 +51,15 @@ config ZMS_MAX_BLOCK_SIZE help Changes the internal buffer size of ZMS +config ZMS_LOOKUP_CACHE_FOR_SETTINGS + bool "ZMS Storage lookup cache optimized for settings" + depends on ZMS_LOOKUP_CACHE + help + Use the lookup cache hash function that results in the least number of + collissions and, in turn, the best ZMS performance provided that the ZMS + is used as the settings backend only. This option should NOT be enabled + if the ZMS is also written to directly, outside the settings layer. + module = ZMS module-str = zms source "subsys/logging/Kconfig.template.log_config" diff --git a/subsys/fs/zms/zms.c b/subsys/fs/zms/zms.c index 4a90ad0129b..839c079f923 100644 --- a/subsys/fs/zms/zms.c +++ b/subsys/fs/zms/zms.c @@ -11,6 +11,10 @@ #include #include #include "zms_priv.h" +#ifdef CONFIG_ZMS_LOOKUP_CACHE_FOR_SETTINGS +#include +#include +#endif #include LOG_MODULE_REGISTER(fs_zms, CONFIG_ZMS_LOG_LEVEL); @@ -25,6 +29,45 @@ static int zms_ate_valid_different_sector(struct zms_fs *fs, const struct zms_at #ifdef CONFIG_ZMS_LOOKUP_CACHE +#ifdef CONFIG_ZMS_LOOKUP_CACHE_FOR_SETTINGS + +static inline size_t zms_lookup_cache_pos(uint32_t id) +{ + /* + * 1. The ZMS settings backend uses up to (ZMS_NAME_ID_OFFSET - 1) ZMS IDs to + store keys and equal number of ZMS IDs to store values. + * 2. For each key-value pair, the value is stored at ZMS ID greater by exactly + * ZMS_NAME_ID_OFFSET than ZMS ID that holds the key. + * 3. The backend tries to minimize the range of ZMS IDs used to store keys. + * That is, ZMS IDs are allocated sequentially, and freed ZMS IDs are reused + * before allocating new ones. + * + * Therefore, to assure the least number of collisions in the lookup cache, + * the least significant bit of the hash indicates whether the given ZMS ID + * represents a key or a value, and remaining bits of the hash are set to + * the ordinal number of the key-value pair. Consequently, the hash function + * provides the following mapping: + * + * 1st settings key => hash 0 + * 1st settings value => hash 1 + * 2nd settings key => hash 2 + * 2nd settings value => hash 3 + * ... + */ + BUILD_ASSERT(IS_POWER_OF_TWO(ZMS_NAMECNT_ID), "ZMS_NAMECNT_ID is not power of 2"); + BUILD_ASSERT(IS_POWER_OF_TWO(ZMS_NAME_ID_OFFSET), "ZMS_NAME_ID_OFFSET is not power of 2"); + + uint32_t key_value_bit; + uint32_t key_value_ord; + + key_value_bit = (id >> LOG2(ZMS_NAME_ID_OFFSET)) & 1; + key_value_ord = id & (ZMS_NAME_ID_OFFSET - 1); + + return ((key_value_ord << 1) | key_value_bit) % CONFIG_ZMS_LOOKUP_CACHE_SIZE; +} + +#else /* CONFIG_ZMS_LOOKUP_CACHE_FOR_SETTINGS */ + static inline size_t zms_lookup_cache_pos(uint32_t id) { uint32_t hash; @@ -40,6 +83,8 @@ static inline size_t zms_lookup_cache_pos(uint32_t id) return hash % CONFIG_ZMS_LOOKUP_CACHE_SIZE; } +#endif /* CONFIG_ZMS_LOOKUP_CACHE_FOR_SETTINGS */ + static int zms_lookup_cache_rebuild(struct zms_fs *fs) { int rc, previous_sector_num = ZMS_INVALID_SECTOR_NUM;