Skip to content

Add sys_heap heap allocator #23941

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 13 commits into from
Apr 14, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
135 changes: 68 additions & 67 deletions include/kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -76,26 +76,6 @@ extern "C" {
#define K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO)
#define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1)

#ifdef CONFIG_WAITQ_SCALABLE

typedef struct {
struct _priq_rb waitq;
} _wait_q_t;

extern bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b);

#define Z_WAIT_Q_INIT(wait_q) { { { .lessthan_fn = z_priq_rb_lessthan } } }

#else

typedef struct {
sys_dlist_t waitq;
} _wait_q_t;

#define Z_WAIT_Q_INIT(wait_q) { SYS_DLIST_STATIC_INIT(&(wait_q)->waitq) }

#endif

#ifdef CONFIG_OBJECT_TRACING
#define _OBJECT_TRACING_NEXT_PTR(type) struct type *__next;
#define _OBJECT_TRACING_LINKED_FLAG u8_t __linked;
Expand Down Expand Up @@ -4172,30 +4152,6 @@ static inline u32_t z_impl_k_msgq_num_used_get(struct k_msgq *msgq)

/** @} */

/**
* @defgroup mem_pool_apis Memory Pool APIs
* @ingroup kernel_apis
* @{
*/

/* Note on sizing: the use of a 20 bit field for block means that,
* assuming a reasonable minimum block size of 16 bytes, we're limited
* to 16M of memory managed by a single pool. Long term it would be
* good to move to a variable bit size based on configuration.
*/
struct k_mem_block_id {
u32_t pool : 8;
u32_t level : 4;
u32_t block : 20;
};

struct k_mem_block {
void *data;
struct k_mem_block_id id;
};

/** @} */

/**
* @defgroup mailbox_apis Mailbox APIs
* @ingroup kernel_apis
Expand Down Expand Up @@ -4756,22 +4712,72 @@ static inline u32_t k_mem_slab_num_free_get(struct k_mem_slab *slab)
/** @} */

/**
* @cond INTERNAL_HIDDEN
* @addtogroup mem_pool_apis
* @{
*/

struct k_mem_pool {
struct sys_mem_pool_base base;
_wait_q_t wait_q;
};
/**
* @brief Initialize a k_heap
*
* This constructs a synchronized k_heap object over a memory region
* specified by the user. Note that while any alignment and size can
* be passed as valid parameters, internal alignment restrictions
* inside the inner sys_heap mean that not all bytes may be usable as
* allocated memory.
*
* @param h Heap struct to initialize
* @param mem Pointer to memory.
* @param bytes Size of memory region, in bytes
*/
void k_heap_init(struct k_heap *h, void *mem, size_t bytes);

/**
* INTERNAL_HIDDEN @endcond
* @brief Allocate memory from a k_heap
*
* Allocates and returns a memory buffer from the memory region owned
* by the heap. If no memory is available immediately, the call will
* block for the specified timeout (constructed via the standard
* timeout API, or K_NO_WAIT or K_FOREVER) waiting for memory to be
* freed. If the allocation cannot be performed by the expiration of
* the timeout, NULL will be returned.
*
* @param h Heap from which to allocate
* @param bytes Desired size of block to allocate
* @param timeout How long to wait, or K_NO_WAIT
* @return A pointer to valid heap memory, or NULL
*/
void *k_heap_alloc(struct k_heap *h, size_t bytes, k_timeout_t timeout);

/**
* @addtogroup mem_pool_apis
* @{
* @brief Free memory allocated by k_heap_alloc()
*
* Returns the specified memory block, which must have been returned
* from k_heap_alloc(), to the heap for use by other callers. Passing
* a NULL block is legal, and has no effect.
*
* @param h Heap to which to return the memory
* @param mem A valid memory block, or NULL
*/
void k_heap_free(struct k_heap *h, void *mem);

/**
* @brief Define a static k_heap
*
* This macro defines and initializes a static memory region and
* k_heap of the requested size. After kernel start, &name can be
* used as if k_heap_init() had been called.
*
* @param name Symbol name for the struct k_heap object
* @param bytes Size of memory region, in bytes
*/
#define K_HEAP_DEFINE(name, bytes) \
char __aligned(sizeof(void *)) kheap_##name[bytes]; \
Z_STRUCT_SECTION_ITERABLE(k_heap, name) = { \
.heap = { \
.init_mem = kheap_##name, \
.init_bytes = (bytes), \
}, \
}

/**
* @brief Statically define and initialize a memory pool.
Expand All @@ -4784,6 +4790,14 @@ struct k_mem_pool {
* If the pool is to be accessed outside the module where it is defined, it
* can be declared via
*
* @note When CONFIG_MEM_POOL_HEAP_BACKEND is enabled, the k_mem_pool
* API is implemented on top of a k_heap, which is a more general
* purpose allocator which does not make the same promises about
* splitting or alignment detailed above. Blocks will be aligned only
* to the 8 byte chunk stride of the underlying heap and may point
* anywhere within the heap; they are not split into four as
* described.
*
* @code extern struct k_mem_pool <name>; @endcode
*
* @param name Name of the memory pool.
Expand All @@ -4793,21 +4807,8 @@ struct k_mem_pool {
* @param align Alignment of the pool's buffer (power of 2).
* @req K-MPOOL-001
*/
#define K_MEM_POOL_DEFINE(name, minsz, maxsz, nmax, align) \
char __aligned(WB_UP(align)) _mpool_buf_##name[WB_UP(maxsz) * nmax \
+ _MPOOL_BITS_SIZE(maxsz, minsz, nmax)]; \
struct sys_mem_pool_lvl _mpool_lvls_##name[Z_MPOOL_LVLS(maxsz, minsz)]; \
Z_STRUCT_SECTION_ITERABLE(k_mem_pool, name) = { \
.base = { \
.buf = _mpool_buf_##name, \
.max_sz = WB_UP(maxsz), \
.n_max = nmax, \
.n_levels = Z_MPOOL_LVLS(maxsz, minsz), \
.levels = _mpool_lvls_##name, \
.flags = SYS_MEM_POOL_KERNEL \
} \
}; \
BUILD_ASSERT(WB_UP(maxsz) >= _MPOOL_MINBLK, "K_MEM_POOL_DEFINE: size of the largest block (parameter maxsz) is too small")
#define K_MEM_POOL_DEFINE(name, minsz, maxsz, nmax, align) \
Z_MEM_POOL_DEFINE(name, minsz, maxsz, nmax, align)

/**
* @brief Allocate memory from a memory pool.
Expand Down
5 changes: 5 additions & 0 deletions include/kernel_includes.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,11 @@
#include <sys/util.h>
#include <sys/mempool_base.h>
#include <kernel_structs.h>
#ifdef CONFIG_MEM_POOL_HEAP_BACKEND
#include <mempool_heap.h>
#else
#include <mempool_sys.h>
#endif
#include <kernel_version.h>
#include <random/rand32.h>
#include <syscall.h>
Expand Down
76 changes: 76 additions & 0 deletions include/kernel_structs.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,12 @@
#define ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_

#if !defined(_ASMLANGUAGE)
#include <sys/atomic.h>
#include <zephyr/types.h>
#include <sched_priq.h>
#include <sys/dlist.h>
#include <sys/util.h>
#include <sys/sys_heap.h>
#endif

#define K_NUM_PRIORITIES \
Expand Down Expand Up @@ -207,6 +209,80 @@ bool z_smp_cpu_mobile(void);

#define _timeout_q _kernel.timeout_q

/* kernel wait queue record */

#ifdef CONFIG_WAITQ_SCALABLE

typedef struct {
struct _priq_rb waitq;
} _wait_q_t;

extern bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b);

#define Z_WAIT_Q_INIT(wait_q) { { { .lessthan_fn = z_priq_rb_lessthan } } }

#else

typedef struct {
sys_dlist_t waitq;
} _wait_q_t;

#define Z_WAIT_Q_INIT(wait_q) { SYS_DLIST_STATIC_INIT(&(wait_q)->waitq) }

#endif

/* kernel timeout record */

struct _timeout;
typedef void (*_timeout_func_t)(struct _timeout *t);

struct _timeout {
sys_dnode_t node;
s32_t dticks;
_timeout_func_t fn;
};

/* kernel spinlock type */

struct k_spinlock {
#ifdef CONFIG_SMP
atomic_t locked;
#endif

#ifdef CONFIG_SPIN_VALIDATE
/* Stores the thread that holds the lock with the locking CPU
* ID in the bottom two bits.
*/
uintptr_t thread_cpu;
#endif

#if defined(CONFIG_CPLUSPLUS) && !defined(CONFIG_SMP) && \
!defined(CONFIG_SPIN_VALIDATE)
/* If CONFIG_SMP and CONFIG_SPIN_VALIDATE are both not defined
* the k_spinlock struct will have no members. The result
* is that in C sizeof(k_spinlock) is 0 and in C++ it is 1.
*
* This size difference causes problems when the k_spinlock
* is embedded into another struct like k_msgq, because C and
* C++ will have different ideas on the offsets of the members
* that come after the k_spinlock member.
*
* To prevent this we add a 1 byte dummy member to k_spinlock
* when the user selects C++ support and k_spinlock would
* otherwise be empty.
*/
char dummy;
#endif
};

/* kernel synchronized heap struct */

struct k_heap {
struct sys_heap heap;
_wait_q_t wait_q;
struct k_spinlock lock;
};

#endif /* _ASMLANGUAGE */

#endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_ */
7 changes: 7 additions & 0 deletions include/linker/common-ram.ld
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,13 @@
_k_mem_pool_list_end = .;
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)

SECTION_DATA_PROLOGUE(_k_heap_area,,SUBALIGN(4))
{
_k_heap_list_start = .;
KEEP(*("._k_heap.static.*"))
_k_heap_list_end = .;
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)

SECTION_DATA_PROLOGUE(_k_sem_area,,SUBALIGN(4))
{
_k_sem_list_start = .;
Expand Down
55 changes: 55 additions & 0 deletions include/mempool_heap.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
/*
* Copyright (c) 2020 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_INCLUDE_MEMPOOL_HEAP_H_

/* Compatibility implementation of a k_mem_pool backend in terms of a
* k_heap
*/

/* The "ID" of a k_heap-based mempool is just the tuple of the data
* block pointer and the heap that allocated it
*/
struct k_mem_block_id {
void *data;
struct k_heap *heap;
};

/* Note the data pointer gets unioned with the same value stored in
* the ID field to save space.
*/
struct k_mem_block {
union {
void *data;
struct k_mem_block_id id;
};
};

struct k_mem_pool {
struct k_heap *heap;
};

/* Sizing is a heuristic, as k_mem_pool made promises about layout
* that k_heap does not. We make space for the number of maximum
* objects defined, and include extra so there's enough metadata space
* available for the maximum number of minimum-sized objects to be
* stored: 8 bytes for each desired chunk header, and a 24 word block
* to reserve room for a "typical" set of bucket list heads (this size
* was picked more to conform with existing test expectations than any
* rigorous theory -- we have tests that rely on being able to
* allocate the blocks promised and ones that make assumptions about
* when memory will run out).
*/
#define Z_MEM_POOL_DEFINE(name, minsz, maxsz, nmax, align) \
K_HEAP_DEFINE(poolheap_##name, \
((maxsz) * (nmax)) \
+ 8 * ((maxsz) * (nmax) / (minsz)) \
+ 24 * sizeof(void *)); \
struct k_mem_pool name = { \
.heap = &poolheap_##name \
}


#endif /* ZEPHYR_INCLUDE_MEMPOOL_HEAP_H_ */
Loading