Skip to content

Commit

Permalink
kernel: userspace: Dynamic thread stack object
Browse files Browse the repository at this point in the history
Add support for dynamic thread stack objects. A new container
for this kernel object was added to avoid its alignment constraint
to all dynamic objects.

Signed-off-by: Flavio Ceolin <[email protected]>
  • Loading branch information
Flavio Ceolin committed Jun 30, 2023
1 parent bbcfd0d commit 9d5ba55
Showing 1 changed file with 82 additions and 42 deletions.
124 changes: 82 additions & 42 deletions kernel/userspace.c
Original file line number Diff line number Diff line change
Expand Up @@ -133,15 +133,29 @@ uint8_t *z_priv_stack_find(k_thread_stack_t *stack)
#define DYN_OBJ_DATA_ALIGN \
MAX(DYN_OBJ_DATA_ALIGN_K_THREAD, (sizeof(void *)))

struct dyn_obj {
struct dyn_obj_base {
struct z_object kobj;
sys_dnode_t dobj_list;
struct rbnode node; /* must be immediately before data member */
};

struct dyn_obj {
struct dyn_obj_base base;

/* The object itself */
uint8_t data[] __aligned(DYN_OBJ_DATA_ALIGN_K_THREAD);
};

/* Thread stacks impose a very restrict alignment. Use this alignment
* (generally page size) for all objects will cause a lot waste memory.
*/
struct dyn_obj_stack {
struct dyn_obj_base base;

/* The object itself */
uint8_t data[] __aligned(Z_KERNEL_STACK_OBJ_ALIGN);
};

extern struct z_object *z_object_gperf_find(const void *obj);
extern void z_object_gperf_wordlist_foreach(_wordlist_cb_func_t func,
void *context);
Expand Down Expand Up @@ -193,6 +207,9 @@ static size_t obj_align_get(enum k_objects otype)
ret = __alignof(struct dyn_obj);
#endif
break;
case K_OBJ_THREAD_STACK_ELEMENT:
ret = __alignof(struct dyn_obj_stack);
break;
default:
ret = __alignof(struct dyn_obj);
break;
Expand All @@ -206,36 +223,32 @@ static bool node_lessthan(struct rbnode *a, struct rbnode *b)
return a < b;
}

static inline struct dyn_obj *node_to_dyn_obj(struct rbnode *node)
{
return CONTAINER_OF(node, struct dyn_obj, node);
}

static inline struct rbnode *dyn_obj_to_node(void *obj)
{
struct dyn_obj *dobj = CONTAINER_OF(obj, struct dyn_obj, data);

return &dobj->node;
}

static struct dyn_obj *dyn_object_find(void *obj)
static struct dyn_obj_base *dyn_object_find(void *obj)
{
struct rbnode *node;
struct dyn_obj *ret;
struct dyn_obj_base *ret;
k_spinlock_key_t key;

/* For any dynamically allocated kernel object, the object
* pointer is just a member of the containing struct dyn_obj,
* so just a little arithmetic is necessary to locate the
* corresponding struct rbnode
*/
node = dyn_obj_to_node(obj);

k_spinlock_key_t key = k_spin_lock(&lists_lock);
if (rb_contains(&obj_rb_tree, node)) {
ret = node_to_dyn_obj(node);
} else {
ret = NULL;
key = k_spin_lock(&lists_lock);

RB_FOR_EACH(&obj_rb_tree, node) {
ret = CONTAINER_OF(node, struct dyn_obj_base, node);
if (((struct dyn_obj *)ret)->data == obj) {
goto end;
} else if (((struct dyn_obj_stack *)ret)->data == obj) {
goto end;
}
}

/* No object found */
ret = NULL;

end:
k_spin_unlock(&lists_lock, key);

return ret;
Expand Down Expand Up @@ -304,18 +317,34 @@ static void thread_idx_free(uintptr_t tidx)
sys_bitfield_set_bit((mem_addr_t)_thread_idx_map, tidx);
}

struct z_object *z_dynamic_object_aligned_create(size_t align, size_t size)
static struct z_object *dynamic_object_create(enum k_objects otype, size_t align,
size_t size)
{
struct dyn_obj *dyn;
struct dyn_obj_base *dyn;

dyn = z_thread_aligned_alloc(align, sizeof(*dyn) + size);
if (dyn == NULL) {
LOG_ERR("could not allocate kernel object, out of memory");
return NULL;
if (otype == K_OBJ_THREAD_STACK_ELEMENT) {
struct dyn_obj_stack *stack;

stack = z_thread_aligned_alloc(align,
sizeof(struct dyn_obj_stack) + size);
if (stack == NULL) {
return NULL;
}
dyn = &stack->base;
dyn->kobj.name = &stack->data;
} else {
struct dyn_obj *obj;

obj = z_thread_aligned_alloc(align,
sizeof(struct dyn_obj) + size);
if (obj == NULL) {
return NULL;
}
dyn = &obj->base;
dyn->kobj.name = &obj->data;
}

dyn->kobj.name = &dyn->data;
dyn->kobj.type = K_OBJ_ANY;
dyn->kobj.type = otype;
dyn->kobj.flags = 0;
(void)memset(dyn->kobj.perms, 0, CONFIG_MAX_THREAD_BYTES);

Expand All @@ -326,6 +355,18 @@ struct z_object *z_dynamic_object_aligned_create(size_t align, size_t size)
k_spin_unlock(&lists_lock, key);

return &dyn->kobj;

}

struct z_object *z_dynamic_object_aligned_create(size_t align, size_t size)
{
struct z_object *obj = dynamic_object_create(K_OBJ_ANY, align, size);

if (obj == NULL) {
LOG_ERR("could not allocate kernel object, out of memory");
}

return obj;
}

static void *z_object_alloc(enum k_objects otype, size_t object_size)
Expand All @@ -345,11 +386,12 @@ static void *z_object_alloc(enum k_objects otype, size_t object_size)
return NULL;
}
break;
case K_OBJ_THREAD_STACK_ELEMENT:
object_size = Z_THREAD_STACK_SIZE_ADJUST(object_size);
break;
/* The following are currently not allowed at all */
case K_OBJ_FUTEX: /* Lives in user memory */
case K_OBJ_SYS_MUTEX: /* Lives in user memory */
case K_OBJ_THREAD_STACK_ELEMENT:
break;
case K_OBJ_NET_SOCKET: /* Indeterminate size */
LOG_ERR("forbidden object type '%s' requested",
otype_to_str(otype));
Expand All @@ -359,15 +401,13 @@ static void *z_object_alloc(enum k_objects otype, size_t object_size)
break;
}

zo = z_dynamic_object_aligned_create(obj_align_get(otype),
object_size);
zo = dynamic_object_create(otype, obj_align_get(otype), object_size);
if (zo == NULL) {
if (otype == K_OBJ_THREAD) {
thread_idx_free(tidx);
}
return NULL;
}
zo->type = otype;

if (otype == K_OBJ_THREAD) {
zo->data.thread_id = tidx;
Expand Down Expand Up @@ -398,7 +438,7 @@ void *z_impl_k_object_alloc_size(enum k_objects otype, size_t size)

void k_object_free(void *obj)
{
struct dyn_obj *dyn;
struct dyn_obj_base *dyn;

/* This function is intentionally not exposed to user mode.
* There's currently no robust way to track that an object isn't
Expand Down Expand Up @@ -430,15 +470,15 @@ struct z_object *z_object_find(const void *obj)
ret = z_object_gperf_find(obj);

if (ret == NULL) {
struct dyn_obj *dynamic_obj;
struct dyn_obj_base *dyn;

/* The cast to pointer-to-non-const violates MISRA
* 11.8 but is justified since we know dynamic objects
* were not declared with a const qualifier.
*/
dynamic_obj = dyn_object_find((void *)obj);
if (dynamic_obj != NULL) {
ret = &dynamic_obj->kobj;
dyn = dyn_object_find((void *)obj);
if (dyn != NULL) {
ret = &dyn->kobj;
}
}

Expand All @@ -447,7 +487,7 @@ struct z_object *z_object_find(const void *obj)

void z_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
{
struct dyn_obj *obj, *next;
struct dyn_obj_base *obj, *next;

z_object_gperf_wordlist_foreach(func, context);

Expand Down Expand Up @@ -487,7 +527,7 @@ static void unref_check(struct z_object *ko, uintptr_t index)

void *vko = ko;

struct dyn_obj *dyn = CONTAINER_OF(vko, struct dyn_obj, kobj);
struct dyn_obj_base *dyn = CONTAINER_OF(vko, struct dyn_obj_base, kobj);

__ASSERT(IS_PTR_ALIGNED(dyn, struct dyn_obj), "unaligned z_object");

Expand Down

0 comments on commit 9d5ba55

Please sign in to comment.