Skip to content

Commit 9a27f79

Browse files
committed
ace: zephyr: alloc: Use virtual memory heap for buffers
The buffer allocation method for ace platform has been changed. They are allocated using the virtual memory heap. This consists of a set of buffers with a predefined size. Signed-off-by: Adrian Warecki <adrian.warecki@intel.com>
1 parent 1b45a41 commit 9a27f79

File tree

3 files changed

+114
-2
lines changed

3 files changed

+114
-2
lines changed

zephyr/Kconfig

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,4 +77,12 @@ config SOF_BOOT_TEST
7777
initialized. After that SOF will continue running and be usable as
7878
usual.
7979

80+
config VIRTUAL_HEAP
81+
bool "Use virtual memory heap to allocate a buffers"
82+
default y if ACE
83+
default n
84+
depends on ACE
85+
help
86+
Enabling this option will use the virtual memory heap allocator to allocate buffers.
87+
It is based on a set of buffers whose size is predetermined.
8088
endif

zephyr/include/sof/lib/regions_mm.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@
3636
* either be spanned on specifically configured heap or have
3737
* individual configs with bigger block sizes.
3838
*/
39-
#define MAX_MEMORY_ALLOCATORS_COUNT 8
39+
#define MAX_MEMORY_ALLOCATORS_COUNT 10
4040

4141
/* vmh_get_default_heap_config() function will try to split the region
4242
* down the given count. Only applicable when API client did not

zephyr/lib/alloc.c

Lines changed: 105 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,13 @@
1818
#include <sof/trace/trace.h>
1919
#include <rtos/symbol.h>
2020
#include <rtos/wait.h>
21+
#if CONFIG_VIRTUAL_HEAP
22+
#include <sof/lib/regions_mm.h>
23+
24+
struct vmh_heap *virtual_buffers_heap;
25+
struct k_spinlock vmh_lock;
26+
#endif /* CONFIG_VIRTUAL_HEAP */
27+
2128

2229
/* Zephyr includes */
2330
#include <zephyr/init.h>
@@ -189,6 +196,91 @@ static void l3_heap_free(struct k_heap *h, void *mem)
189196

190197
#endif
191198

199+
#if CONFIG_VIRTUAL_HEAP
200+
static void *virtual_heap_alloc(struct vmh_heap *heap, uint32_t flags, uint32_t caps, size_t bytes,
201+
uint32_t align)
202+
{
203+
void *mem;
204+
205+
K_SPINLOCK(&vmh_lock) {
206+
heap->core_id = cpu_get_id();
207+
mem = vmh_alloc(heap, bytes);
208+
}
209+
210+
if (!mem)
211+
return NULL;
212+
213+
assert(IS_ALIGNED(mem, align));
214+
215+
if (flags & SOF_MEM_FLAG_COHERENT)
216+
return sys_cache_uncached_ptr_get((__sparse_force void __sparse_cache *)mem);
217+
218+
return mem;
219+
}
220+
221+
/**
222+
* Checks whether pointer is from virtual memory range.
223+
* @param ptr Pointer to memory being checked.
224+
* @return True if pointer falls into virtual memory region, false otherwise.
225+
*/
226+
static bool is_virtual_heap_pointer(void *ptr)
227+
{
228+
uintptr_t virtual_heap_start = POINTER_TO_UINT(sys_cache_cached_ptr_get(&heapmem)) +
229+
HEAPMEM_SIZE;
230+
uintptr_t virtual_heap_end = CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE;
231+
232+
if (!is_cached(ptr))
233+
ptr = (__sparse_force void *)sys_cache_cached_ptr_get(ptr);
234+
235+
return ((POINTER_TO_UINT(ptr) >= virtual_heap_start) &&
236+
(POINTER_TO_UINT(ptr) < virtual_heap_end));
237+
}
238+
239+
static void virtual_heap_free(void *ptr)
240+
{
241+
int ret;
242+
243+
ptr = (__sparse_force void *)sys_cache_cached_ptr_get(ptr);
244+
245+
K_SPINLOCK(&vmh_lock) {
246+
virtual_buffers_heap->core_id = cpu_get_id();
247+
ret = vmh_free(virtual_buffers_heap, ptr);
248+
}
249+
250+
if (ret)
251+
tr_err(&zephyr_tr, "Unable to free %p! %d", ptr, ret);
252+
}
253+
254+
static const struct vmh_heap_config static_hp_buffers = {
255+
{
256+
{ 128, 32},
257+
{ 512, 8},
258+
{ 1024, 44},
259+
{ 2048, 8},
260+
{ 4096, 11},
261+
{ 8192, 10},
262+
{ 65536, 3},
263+
{ 131072, 1},
264+
{ 524288, 1} /* buffer for kpb */
265+
},
266+
};
267+
268+
static int virtual_heap_init(void)
269+
{
270+
k_spinlock_init(&vmh_lock);
271+
272+
virtual_buffers_heap = vmh_init_heap(&static_hp_buffers, MEM_REG_ATTR_SHARED_HEAP, 0,
273+
false);
274+
if (!virtual_buffers_heap)
275+
tr_err(&zephyr_tr, "Unable to init virtual buffers heap!");
276+
277+
return 0;
278+
}
279+
280+
SYS_INIT(virtual_heap_init, POST_KERNEL, 1);
281+
282+
#endif /* CONFIG_VIRTUAL_HEAP */
283+
192284
static void *heap_alloc_aligned(struct k_heap *h, size_t min_align, size_t bytes)
193285
{
194286
k_spinlock_key_t key;
@@ -395,6 +487,12 @@ void *rballoc_align(uint32_t flags, uint32_t caps, size_t bytes,
395487
heap = &sof_heap;
396488
}
397489

490+
#if CONFIG_VIRTUAL_HEAP
491+
/* Use virtual heap if it is available */
492+
if (virtual_buffers_heap)
493+
return virtual_heap_alloc(virtual_buffers_heap, flags, caps, bytes, align);
494+
#endif /* CONFIG_VIRTUAL_HEAP */
495+
398496
if (flags & SOF_MEM_FLAG_COHERENT)
399497
return heap_alloc_aligned(heap, align, bytes);
400498

@@ -417,6 +515,13 @@ void rfree(void *ptr)
417515
}
418516
#endif
419517

518+
#if CONFIG_VIRTUAL_HEAP
519+
if (is_virtual_heap_pointer(ptr)) {
520+
virtual_heap_free(ptr);
521+
return;
522+
}
523+
#endif
524+
420525
heap_free(&sof_heap, ptr);
421526
}
422527
EXPORT_SYMBOL(rfree);
@@ -428,7 +533,6 @@ static int heap_init(void)
428533
#if CONFIG_L3_HEAP
429534
sys_heap_init(&l3_heap.heap, UINT_TO_POINTER(get_l3_heap_start()), get_l3_heap_size());
430535
#endif
431-
432536
return 0;
433537
}
434538

0 commit comments

Comments
 (0)