Skip to content

Commit

Permalink
ace: zephyr: alloc: Use virtual memory heap for buffers
Browse files Browse the repository at this point in the history
The buffer allocation method for ace platform has been changed. They are
allocated using the virtual memory heap. This consists of a set of buffers
with a predefined size.

Signed-off-by: Adrian Warecki <[email protected]>
  • Loading branch information
softwarecki committed Jul 9, 2024
1 parent 1b45a41 commit c6532b7
Show file tree
Hide file tree
Showing 3 changed files with 119 additions and 2 deletions.
8 changes: 8 additions & 0 deletions zephyr/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -77,4 +77,12 @@ config SOF_BOOT_TEST
initialized. After that SOF will continue running and be usable as
usual.

config VIRTUAL_HEAP
bool "Use virtual memory heap to allocate a buffers"
default y if ACE
default n
depends on ACE
help
Enabling this option will use the virtual memory heap allocator to allocate buffers.
It is based on a set of buffers whose size is predetermined.
endif
2 changes: 1 addition & 1 deletion zephyr/include/sof/lib/regions_mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
* either be spanned on specifically configured heap or have
* individual configs with bigger block sizes.
*/
#define MAX_MEMORY_ALLOCATORS_COUNT 8
#define MAX_MEMORY_ALLOCATORS_COUNT 10

/* vmh_get_default_heap_config() function will try to split the region
* down the given count. Only applicable when API client did not
Expand Down
111 changes: 110 additions & 1 deletion zephyr/lib/alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,18 @@
#include <sof/trace/trace.h>
#include <rtos/symbol.h>
#include <rtos/wait.h>
#if CONFIG_VIRTUAL_HEAP
#include <sof/lib/regions_mm.h>

struct vmh_heap *virtual_buffers_heap;
struct k_spinlock vmh_lock;

#undef HEAPMEM_SIZE
/* Buffers are allocated from virtual space so we can safely reduce the heap size.
*/
#define HEAPMEM_SIZE 0x20000
#endif /* CONFIG_VIRTUAL_HEAP */


/* Zephyr includes */
#include <zephyr/init.h>
Expand Down Expand Up @@ -189,6 +201,91 @@ static void l3_heap_free(struct k_heap *h, void *mem)

#endif

#if CONFIG_VIRTUAL_HEAP
static void *virtual_heap_alloc(struct vmh_heap *heap, uint32_t flags, uint32_t caps, size_t bytes,
uint32_t align)
{
void *mem;

K_SPINLOCK(&vmh_lock) {
heap->core_id = cpu_get_id();
mem = vmh_alloc(heap, bytes);
}

if (!mem)
return NULL;

assert(IS_ALIGNED(mem, align));

if (flags & SOF_MEM_FLAG_COHERENT)
return sys_cache_uncached_ptr_get((__sparse_force void __sparse_cache *)mem);

return mem;
}

/**
* Checks whether pointer is from virtual memory range.
* @param ptr Pointer to memory being checked.
* @return True if pointer falls into virtual memory region, false otherwise.
*/
static bool is_virtual_heap_pointer(void *ptr)
{
uintptr_t virtual_heap_start = POINTER_TO_UINT(sys_cache_cached_ptr_get(&heapmem)) +
HEAPMEM_SIZE;
uintptr_t virtual_heap_end = CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE;

if (!is_cached(ptr))
ptr = (__sparse_force void *)sys_cache_cached_ptr_get(ptr);

return ((POINTER_TO_UINT(ptr) >= virtual_heap_start) &&
(POINTER_TO_UINT(ptr) < virtual_heap_end));
}

static void virtual_heap_free(void *ptr)
{
int ret;

ptr = (__sparse_force void *)sys_cache_cached_ptr_get(ptr);

K_SPINLOCK(&vmh_lock) {
virtual_buffers_heap->core_id = cpu_get_id();
ret = vmh_free(virtual_buffers_heap, ptr);
}

if (ret)
tr_err(&zephyr_tr, "Unable to free %p! %d", ptr, ret);
}

static const struct vmh_heap_config static_hp_buffers = {
{
{ 128, 32},
{ 512, 8},
{ 1024, 44},
{ 2048, 8},
{ 4096, 11},
{ 8192, 10},
{ 65536, 3},
{ 131072, 1},
{ 524288, 1} /* buffer for kpb */
},
};

static int virtual_heap_init(void)
{
k_spinlock_init(&vmh_lock);

virtual_buffers_heap = vmh_init_heap(&static_hp_buffers, MEM_REG_ATTR_SHARED_HEAP, 0,
false);
if (!virtual_buffers_heap)
tr_err(&zephyr_tr, "Unable to init virtual buffers heap!");

return 0;
}

SYS_INIT(virtual_heap_init, POST_KERNEL, 1);

#endif /* CONFIG_VIRTUAL_HEAP */

static void *heap_alloc_aligned(struct k_heap *h, size_t min_align, size_t bytes)
{
k_spinlock_key_t key;
Expand Down Expand Up @@ -395,6 +492,12 @@ void *rballoc_align(uint32_t flags, uint32_t caps, size_t bytes,
heap = &sof_heap;
}

#if CONFIG_VIRTUAL_HEAP
/* Use virtual heap if it is available */
if (virtual_buffers_heap)
return virtual_heap_alloc(virtual_buffers_heap, flags, caps, bytes, align);
#endif /* CONFIG_VIRTUAL_HEAP */

if (flags & SOF_MEM_FLAG_COHERENT)
return heap_alloc_aligned(heap, align, bytes);

Expand All @@ -417,6 +520,13 @@ void rfree(void *ptr)
}
#endif

#if CONFIG_VIRTUAL_HEAP
if (is_virtual_heap_pointer(ptr)) {
virtual_heap_free(ptr);
return;
}
#endif

heap_free(&sof_heap, ptr);
}
EXPORT_SYMBOL(rfree);
Expand All @@ -428,7 +538,6 @@ static int heap_init(void)
#if CONFIG_L3_HEAP
sys_heap_init(&l3_heap.heap, UINT_TO_POINTER(get_l3_heap_start()), get_l3_heap_size());
#endif

return 0;
}

Expand Down

0 comments on commit c6532b7

Please sign in to comment.