From d65bf8a97fb1039f080f463bcf9931e9fa158d85 Mon Sep 17 00:00:00 2001 From: Christopher Friedt Date: Mon, 27 Jun 2022 23:43:32 -0400 Subject: [PATCH] kernel: support dynamic thread stack allocation Add support for dynamic thread stack allocation Signed-off-by: Christopher Friedt --- include/zephyr/kernel.h | 29 +++++++ kernel/CMakeLists.txt | 6 ++ kernel/Kconfig | 62 +++++++++++++++ kernel/dynamic.c | 162 ++++++++++++++++++++++++++++++++++++++++ 4 files changed, 259 insertions(+) create mode 100644 kernel/dynamic.c diff --git a/include/zephyr/kernel.h b/include/zephyr/kernel.h index d3a435a4d38aef..9a88baeb5bdf55 100644 --- a/include/zephyr/kernel.h +++ b/include/zephyr/kernel.h @@ -265,6 +265,35 @@ extern void k_thread_foreach_unlocked( /* end - thread options */ #if !defined(_ASMLANGUAGE) +/** + * @brief Dynamically allocate a thread stack. + * + * Relevant stack creation flags include: + * - @ref K_USER allocate a userspace thread (requires `CONFIG_USERSPACE=y`) + * + * @param size Stack size in bytes. + * @param flags Stack creation flags, or 0. + * + * @retval the allocated thread stack on success. + * @retval NULL on failure. + * + * @see CONFIG_DYNAMIC_THREAD + */ +__syscall k_thread_stack_t *k_thread_stack_alloc(size_t size, int flags); + +/** + * @brief Free a dynamically allocated thread stack. + * + * @param stack Pointer to the thread stack. + * + * @retval 0 on success. + * @retval -EBUSY if the thread stack is in use. + * @retval -EINVAL if @p stack is invalid. + * + * @see CONFIG_DYNAMIC_THREAD + */ +__syscall int k_thread_stack_free(k_thread_stack_t *stack); + /** * @brief Create a thread. * diff --git a/kernel/CMakeLists.txt b/kernel/CMakeLists.txt index 7b4781e4fd3377..82427e83ba880b 100644 --- a/kernel/CMakeLists.txt +++ b/kernel/CMakeLists.txt @@ -122,6 +122,12 @@ target_sources_ifdef( userspace.c ) +target_sources_ifdef( + CONFIG_DYNAMIC_THREAD + kernel PRIVATE + dynamic.c + ) + target_include_directories(kernel PRIVATE ${ZEPHYR_BASE}/kernel/include ${ARCH_DIR}/${ARCH}/include diff --git a/kernel/Kconfig b/kernel/Kconfig index e553553b1c740c..12cd7b8f836b66 100644 --- a/kernel/Kconfig +++ b/kernel/Kconfig @@ -203,6 +203,68 @@ config THREAD_USERSPACE_LOCAL_DATA depends on USERSPACE default y if ERRNO && !ERRNO_IN_TLS +config DYNAMIC_THREAD + bool "Support for dynamic threads [EXPERIMENTAL]" + select EXPERIMENTAL + depends on THREAD_STACK_INFO + select DYNAMIC_OBJECTS if USERSPACE + help + Enable support for dynamic threads and stacks. + +if DYNAMIC_THREAD + +config DYNAMIC_THREAD_STACK_SIZE + int "Size of each pre-allocated thread stack" + default 1024 if !64BIT + default 2048 if 64BIT + help + Default stack size (in bytes) for dynamic threads. + +config DYNAMIC_THREAD_ALLOC + bool "Support heap-allocated thread objects and stacks" + help + Select this option to enable allocating thread object and + thread stacks from the system heap. + + Only use this type of allocation in situations + where malloc is permitted. + +config DYNAMIC_THREAD_POOL_SIZE + int "Number of statically pre-allocated threads" + default 0 + range 0 8192 + help + Pre-allocate a fixed number of thread objects and + stacks at build time. + + This type of "dynamic" stack is usually suitable in + situations where malloc is not permitted. + +choice DYNAMIC_THREAD_PREFER + prompt "Preferred dynamic thread allocator" + default DYNAMIC_THREAD_PREFER_POOL + help + If both CONFIG_DYNAMIC_THREAD_ALLOC=y and + CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0, then the user may + specify the order in which allocation is attmpted. + +config DYNAMIC_THREAD_PREFER_ALLOC + bool "Prefer heap-based allocation" + depends on DYNAMIC_THREAD_ALLOC + help + Select this option to attempt a heap-based allocation + prior to any pool-based allocation. + +config DYNAMIC_THREAD_PREFER_POOL + bool "Prefer pool-based allocation" + help + Select this option to attempt a pool-based allocation + prior to any heap-based allocation. + +endchoice # DYNAMIC_THREAD_PREFER + +endif # DYNAMIC_THREADS + config LIBC_ERRNO bool help diff --git a/kernel/dynamic.c b/kernel/dynamic.c new file mode 100644 index 00000000000000..6f9c1f6adcfefb --- /dev/null +++ b/kernel/dynamic.c @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2022, Meta + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "kernel_internal.h" + +#include +#include +#include +#include + +LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); + +#if CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0 +#define BA_SIZE CONFIG_DYNAMIC_THREAD_POOL_SIZE +#else +#define BA_SIZE 1 +#endif + +struct dyn_cb_data { + k_tid_t tid; + k_thread_stack_t *stack; +}; + +static K_THREAD_STACK_ARRAY_DEFINE(dynamic_stack, CONFIG_DYNAMIC_THREAD_POOL_SIZE, + CONFIG_DYNAMIC_THREAD_STACK_SIZE); +SYS_BITARRAY_DEFINE_STATIC(dynamic_ba, BA_SIZE); + +static k_thread_stack_t *z_thread_stack_alloc_dyn(size_t align, size_t size) +{ + return z_thread_aligned_alloc(align, size); +} + +static k_thread_stack_t *z_thread_stack_alloc_pool(size_t size) +{ + int rv; + size_t offset; + k_thread_stack_t *stack; + + if (size > CONFIG_DYNAMIC_THREAD_STACK_SIZE) { + LOG_DBG("stack size %zu is > pool stack size %d", size, + CONFIG_DYNAMIC_THREAD_STACK_SIZE); + return NULL; + } + + rv = sys_bitarray_alloc(&dynamic_ba, 1, &offset); + if (rv < 0) { + LOG_DBG("unable to allocate stack from pool"); + return NULL; + } + + __ASSERT_NO_MSG(offset < CONFIG_DYNAMIC_THREAD_POOL_SIZE); + + stack = (k_thread_stack_t *)&dynamic_stack[offset]; + + return stack; +} + +k_thread_stack_t *z_impl_k_thread_stack_alloc(size_t size, int flags) +{ + size_t align = 0; + size_t obj_size = 0; + k_thread_stack_t *stack = NULL; + +#ifdef CONFIG_USERSPACE + if ((flags & K_USER) != 0) { + align = Z_THREAD_STACK_OBJ_ALIGN(size); + obj_size = Z_THREAD_STACK_SIZE_ADJUST(size); + } else +#endif + { + align = Z_KERNEL_STACK_OBJ_ALIGN; + obj_size = Z_KERNEL_STACK_SIZE_ADJUST(size); + } + + if (IS_ENABLED(CONFIG_DYNAMIC_THREAD_PREFER_ALLOC)) { + stack = z_thread_stack_alloc_dyn(align, obj_size); + if (stack == NULL && CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0) { + stack = z_thread_stack_alloc_pool(size); + } + } else if (IS_ENABLED(CONFIG_DYNAMIC_THREAD_PREFER_POOL) && + CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0) { + stack = z_thread_stack_alloc_pool(size); + if (stack == NULL && IS_ENABLED(CONFIG_DYNAMIC_THREAD_ALLOC)) { + stack = z_thread_stack_alloc_dyn(align, obj_size); + } + } else { + return NULL; + } + + return stack; +} + +#ifdef CONFIG_USERSPACE +static inline k_thread_stack_t *z_vrfy_k_thread_stack_alloc(size_t size, int flags) +{ + return z_impl_k_thread_stack_alloc(size, flags); +} +#include +#endif + +static void dyn_cb(const struct k_thread *thread, void *user_data) +{ + struct dyn_cb_data *const data = (struct dyn_cb_data *)user_data; + + if (data->stack == (k_thread_stack_t *)thread->stack_info.start) { + __ASSERT(data->tid == NULL, "stack %p is associated with more than one thread!"); + data->tid = (k_tid_t)thread; + } +} + +int z_impl_k_thread_stack_free(k_thread_stack_t *stack) +{ + char state_buf[16] = {0}; + struct dyn_cb_data data = {.stack = stack}; + + /* Get a possible tid associated with stack */ + k_thread_foreach(dyn_cb, &data); + + if (data.tid != NULL) { + /* Check if thread is in use */ + if (k_thread_state_str(data.tid, state_buf, sizeof(state_buf)) != state_buf) { + LOG_ERR("tid %p is invalid!", data.tid); + return -EINVAL; + } + + if (!(strcmp("dummy", state_buf) == 0) || (strcmp("dead", state_buf) == 0)) { + LOG_ERR("tid %p is in use!", data.tid); + return -EBUSY; + } + } + + if (CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0) { + if (IS_ARRAY_ELEMENT(dynamic_stack, stack)) { + if (sys_bitarray_free(&dynamic_ba, 1, ARRAY_INDEX(dynamic_stack, stack))) { + LOG_ERR("stack %p is not allocated!", stack); + return -EINVAL; + } + + return 0; + } + } + + if (IS_ENABLED(CONFIG_DYNAMIC_THREAD_ALLOC)) { + k_free(stack); + } else { + LOG_ERR("Invalid stack %p", stack); + return -EINVAL; + } + + return 0; +} + +#ifdef CONFIG_USERSPACE +static inline int z_vrfy_k_thread_stack_free(k_thread_stack_t *stack) +{ + return z_impl_k_thread_stack_free(stack); +} +#include +#endif