Skip to content

Commit

Permalink
regions_mm: get_mapped_size
Browse files Browse the repository at this point in the history
Signed-off-by: Adrian Warecki <[email protected]>
  • Loading branch information
softwarecki committed Sep 13, 2024
1 parent c9aeb44 commit 72faaf3
Showing 1 changed file with 54 additions and 5 deletions.
59 changes: 54 additions & 5 deletions zephyr/lib/regions_mm.c
Original file line number Diff line number Diff line change
Expand Up @@ -279,6 +279,44 @@ static bool vmh_get_map_region_boundaries(struct sys_mem_blocks *blocks, const v
return true;
}

/**
* @brief Determine the size of the mapped memory region.
*
* This function calculates the size of a mapped memory region starting from the given address.
* It uses a binary search algorithm to find the boundary of the mapped region by checking if
* pages are mapped or not.
*
* @param addr Starting address of the memory region.
* @param size Pointer to the size of the memory region. This value will be updated to reflect
* the size of the mapped region.
*
* @retval None
*/
static void vmh_get_mapped_size(void *addr, size_t *size)
{
int ret;
uintptr_t check, unused;
uintptr_t left, right;

if (*size <= CONFIG_MM_DRV_PAGE_SIZE)
return;

left = (POINTER_TO_UINT(addr));
right = left + *size;
check = right - CONFIG_MM_DRV_PAGE_SIZE;
while (right - left > CONFIG_MM_DRV_PAGE_SIZE) {
ret = sys_mm_drv_page_phys_get(UINT_TO_POINTER(check), &unused);
if (!ret) {
left = check; /* Page is mapped */
} else {
right = check; /* Page is unmapped */
}
check = ALIGN_DOWN(left / 2 + right / 2, CONFIG_MM_DRV_PAGE_SIZE);
}

*size = right - POINTER_TO_UINT(addr);
}

/**
* @brief Maps memory pages for a memory region if they have not been previously mapped for other
* allocations.
Expand Down Expand Up @@ -326,8 +364,11 @@ static int vmh_unmap_region(struct sys_mem_blocks *region, void *ptr, size_t siz
const size_t block_size = 1 << region->info.blk_sz_shift;
uintptr_t begin;

if (block_size >= CONFIG_MM_DRV_PAGE_SIZE)
return sys_mm_drv_unmap_region(ptr, ALIGN_UP(size, CONFIG_MM_DRV_PAGE_SIZE));
if (block_size >= CONFIG_MM_DRV_PAGE_SIZE) {
size = ALIGN_UP(size, CONFIG_MM_DRV_PAGE_SIZE);
vmh_get_mapped_size(ptr, &size);
return sys_mm_drv_unmap_region(ptr, size);
}

if (vmh_get_map_region_boundaries(region, ptr, size, &begin, &size))
return sys_mm_drv_unmap_region((void *)begin, size);
Expand Down Expand Up @@ -515,6 +556,7 @@ int vmh_free_heap(struct vmh_heap *heap)
* @retval 0 on success;
* @retval -ENOTEMPTY on heap having active allocations.
*/
int vmh_error;
int vmh_free(struct vmh_heap *heap, void *ptr)
{
int retval;
Expand Down Expand Up @@ -620,11 +662,18 @@ int vmh_free(struct vmh_heap *heap, void *ptr)
/* Platforms based on xtensa have a non-coherent cache between cores. Before releasing
* a memory block, it is necessary to invalidate the cache. This memory block can be
* allocated by another core and performing cache writeback by the previous owner will
* destroy current content of the main memory.
* destroy current content of the main memory. The cache is invalidated by the
* sys_mm_drv_unmap_region function, when a memory page is unmapped. There is no need to
* invalidate it when releasing buffers of at least a page in size.
*/
sys_cache_data_invd_range(ptr, size_to_free);
return vmh_unmap_region(heap->physical_blocks_allocators[mem_block_iter], ptr,
if (size_to_free < CONFIG_MM_DRV_PAGE_SIZE)
sys_cache_data_invd_range(ptr, size_to_free);
int ret;
ret = vmh_unmap_region(heap->physical_blocks_allocators[mem_block_iter], ptr,
size_to_free);
if (ret)
vmh_error = ret;
return ret;
}

/**
Expand Down

0 comments on commit 72faaf3

Please sign in to comment.