From 72faaf32a44264e42635784cdff58b672d3c68fa Mon Sep 17 00:00:00 2001 From: Adrian Warecki Date: Tue, 10 Sep 2024 17:23:02 +0200 Subject: [PATCH] regions_mm: get_mapped_size Signed-off-by: Adrian Warecki --- zephyr/lib/regions_mm.c | 59 +++++++++++++++++++++++++++++++++++++---- 1 file changed, 54 insertions(+), 5 deletions(-) diff --git a/zephyr/lib/regions_mm.c b/zephyr/lib/regions_mm.c index b8a73eb1b370..3285853da9fd 100644 --- a/zephyr/lib/regions_mm.c +++ b/zephyr/lib/regions_mm.c @@ -279,6 +279,44 @@ static bool vmh_get_map_region_boundaries(struct sys_mem_blocks *blocks, const v return true; } +/** + * @brief Determine the size of the mapped memory region. + * + * This function calculates the size of a mapped memory region starting from the given address. + * It uses a binary search algorithm to find the boundary of the mapped region by checking if + * pages are mapped or not. + * + * @param addr Starting address of the memory region. + * @param size Pointer to the size of the memory region. This value will be updated to reflect + * the size of the mapped region. + * + * @retval None + */ +static void vmh_get_mapped_size(void *addr, size_t *size) +{ + int ret; + uintptr_t check, unused; + uintptr_t left, right; + + if (*size <= CONFIG_MM_DRV_PAGE_SIZE) + return; + + left = (POINTER_TO_UINT(addr)); + right = left + *size; + check = right - CONFIG_MM_DRV_PAGE_SIZE; + while (right - left > CONFIG_MM_DRV_PAGE_SIZE) { + ret = sys_mm_drv_page_phys_get(UINT_TO_POINTER(check), &unused); + if (!ret) { + left = check; /* Page is mapped */ + } else { + right = check; /* Page is unmapped */ + } + check = ALIGN_DOWN(left / 2 + right / 2, CONFIG_MM_DRV_PAGE_SIZE); + } + + *size = right - POINTER_TO_UINT(addr); +} + /** * @brief Maps memory pages for a memory region if they have not been previously mapped for other * allocations. @@ -326,8 +364,11 @@ static int vmh_unmap_region(struct sys_mem_blocks *region, void *ptr, size_t siz const size_t block_size = 1 << region->info.blk_sz_shift; uintptr_t begin; - if (block_size >= CONFIG_MM_DRV_PAGE_SIZE) - return sys_mm_drv_unmap_region(ptr, ALIGN_UP(size, CONFIG_MM_DRV_PAGE_SIZE)); + if (block_size >= CONFIG_MM_DRV_PAGE_SIZE) { + size = ALIGN_UP(size, CONFIG_MM_DRV_PAGE_SIZE); + vmh_get_mapped_size(ptr, &size); + return sys_mm_drv_unmap_region(ptr, size); + } if (vmh_get_map_region_boundaries(region, ptr, size, &begin, &size)) return sys_mm_drv_unmap_region((void *)begin, size); @@ -515,6 +556,7 @@ int vmh_free_heap(struct vmh_heap *heap) * @retval 0 on success; * @retval -ENOTEMPTY on heap having active allocations. */ +int vmh_error; int vmh_free(struct vmh_heap *heap, void *ptr) { int retval; @@ -620,11 +662,18 @@ int vmh_free(struct vmh_heap *heap, void *ptr) /* Platforms based on xtensa have a non-coherent cache between cores. Before releasing * a memory block, it is necessary to invalidate the cache. This memory block can be * allocated by another core and performing cache writeback by the previous owner will - * destroy current content of the main memory. + * destroy current content of the main memory. The cache is invalidated by the + * sys_mm_drv_unmap_region function, when a memory page is unmapped. There is no need to + * invalidate it when releasing buffers of at least a page in size. */ - sys_cache_data_invd_range(ptr, size_to_free); - return vmh_unmap_region(heap->physical_blocks_allocators[mem_block_iter], ptr, + if (size_to_free < CONFIG_MM_DRV_PAGE_SIZE) + sys_cache_data_invd_range(ptr, size_to_free); + int ret; + ret = vmh_unmap_region(heap->physical_blocks_allocators[mem_block_iter], ptr, size_to_free); + if (ret) + vmh_error = ret; + return ret; } /**