diff --git a/lib/posix/options/key.c b/lib/posix/options/key.c index a7906b8d9f7ce5..f49198d173c81b 100644 --- a/lib/posix/options/key.c +++ b/lib/posix/options/key.c @@ -11,6 +11,7 @@ #include #include #include +#include struct pthread_key_data { sys_snode_t node; @@ -19,7 +20,7 @@ struct pthread_key_data { LOG_MODULE_REGISTER(pthread_key, CONFIG_PTHREAD_KEY_LOG_LEVEL); -static struct k_spinlock pthread_key_lock; +static SYS_SEM_DEFINE(pthread_key_lock, 1, 1); /* This is non-standard (i.e. an implementation detail) */ #define PTHREAD_KEY_INITIALIZER (-1) @@ -128,42 +129,40 @@ int pthread_key_create(pthread_key_t *key, int pthread_key_delete(pthread_key_t key) { size_t bit; - __unused int ret; - pthread_key_obj *key_obj; + int ret = EINVAL; + pthread_key_obj *key_obj = NULL; struct pthread_key_data *key_data; sys_snode_t *node_l, *next_node_l; - k_spinlock_key_t key_key; - key_key = k_spin_lock(&pthread_key_lock); + SYS_SEM_LOCK(&pthread_key_lock) { + key_obj = get_posix_key(key); + if (key_obj == NULL) { + ret = EINVAL; + SYS_SEM_LOCK_BREAK; + } - key_obj = get_posix_key(key); - if (key_obj == NULL) { - k_spin_unlock(&pthread_key_lock, key_key); - return EINVAL; - } + /* Delete thread-specific elements associated with the key */ + SYS_SLIST_FOR_EACH_NODE_SAFE(&(key_obj->key_data_l), node_l, next_node_l) { - /* Delete thread-specific elements associated with the key */ - SYS_SLIST_FOR_EACH_NODE_SAFE(&(key_obj->key_data_l), - node_l, next_node_l) { + /* Remove the object from the list key_data_l */ + key_data = (struct pthread_key_data *)sys_slist_get(&(key_obj->key_data_l)); - /* Remove the object from the list key_data_l */ - key_data = (struct pthread_key_data *) - sys_slist_get(&(key_obj->key_data_l)); + /* Deallocate the object's memory */ + k_free((void *)key_data); + LOG_DBG("Freed key data %p for key %x in thread %x", key_data, key, + pthread_self()); + } - /* Deallocate the object's memory */ - k_free((void *)key_data); - LOG_DBG("Freed key data %p for key %x in thread %x", key_data, key, pthread_self()); + bit = posix_key_to_offset(key_obj); + ret = sys_bitarray_free(&posix_key_bitarray, 1, bit); + __ASSERT_NO_MSG(ret == 0); } - bit = posix_key_to_offset(key_obj); - ret = sys_bitarray_free(&posix_key_bitarray, 1, bit); - __ASSERT_NO_MSG(ret == 0); - - k_spin_unlock(&pthread_key_lock, key_key); - - LOG_DBG("Deleted key %p (%x)", key_obj, key); + if (ret == 0) { + LOG_DBG("Deleted key %p (%x)", key_obj, key); + } - return 0; + return ret; } /** @@ -173,12 +172,10 @@ int pthread_key_delete(pthread_key_t key) */ int pthread_setspecific(pthread_key_t key, const void *value) { - pthread_key_obj *key_obj; + pthread_key_obj *key_obj = NULL; struct posix_thread *thread; struct pthread_key_data *key_data; - pthread_thread_data *thread_spec_data; - k_spinlock_key_t key_key; - sys_snode_t *node_l; + sys_snode_t *node_l = NULL; int retval = 0; thread = to_posix_thread(pthread_self()); @@ -190,37 +187,38 @@ int pthread_setspecific(pthread_key_t key, const void *value) * If the key is already in the list, re-assign its value. * Else add the key to the thread's list. */ - key_key = k_spin_lock(&pthread_key_lock); - - key_obj = get_posix_key(key); - if (key_obj == NULL) { - k_spin_unlock(&pthread_key_lock, key_key); - return EINVAL; - } - - SYS_SLIST_FOR_EACH_NODE(&(thread->key_list), node_l) { + SYS_SEM_LOCK(&pthread_key_lock) { + key_obj = get_posix_key(key); + if (key_obj == NULL) { + retval = EINVAL; + SYS_SEM_LOCK_BREAK; + } - thread_spec_data = (pthread_thread_data *)node_l; + SYS_SLIST_FOR_EACH_NODE(&(thread->key_list), node_l) { + pthread_thread_data *thread_spec_data = (pthread_thread_data *)node_l; - if (thread_spec_data->key == key_obj) { + if (thread_spec_data->key == key_obj) { + /* Key is already present so associate thread specific data */ + thread_spec_data->spec_data = (void *)value; + LOG_DBG("Paired key %x to value %p for thread %x", key, value, + pthread_self()); + break; + } + } - /* Key is already present so - * associate thread specific data - */ - thread_spec_data->spec_data = (void *)value; - LOG_DBG("Paired key %x to value %p for thread %x", key, value, - pthread_self()); - goto out; + retval = 0; + if (node_l != NULL) { + /* Key is already present, so we are done */ + SYS_SEM_LOCK_BREAK; } - } - if (node_l == NULL) { + /* Key and data need to be added */ key_data = k_malloc(sizeof(struct pthread_key_data)); if (key_data == NULL) { LOG_DBG("Failed to allocate key data for key %x", key); retval = ENOMEM; - goto out; + SYS_SEM_LOCK_BREAK; } LOG_DBG("Allocated key data %p for key %x in thread %x", key_data, key, @@ -239,9 +237,6 @@ int pthread_setspecific(pthread_key_t key, const void *value) LOG_DBG("Paired key %x to value %p for thread %x", key, value, pthread_self()); } -out: - k_spin_unlock(&pthread_key_lock, key_key); - return retval; } @@ -257,33 +252,30 @@ void *pthread_getspecific(pthread_key_t key) pthread_thread_data *thread_spec_data; void *value = NULL; sys_snode_t *node_l; - k_spinlock_key_t key_key; thread = to_posix_thread(pthread_self()); if (thread == NULL) { return NULL; } - key_key = k_spin_lock(&pthread_key_lock); - - key_obj = get_posix_key(key); - if (key_obj == NULL) { - k_spin_unlock(&pthread_key_lock, key_key); - return NULL; - } + SYS_SEM_LOCK(&pthread_key_lock) { + key_obj = get_posix_key(key); + if (key_obj == NULL) { + value = NULL; + SYS_SEM_LOCK_BREAK; + } - /* Traverse the list of keys set by the thread, looking for key */ + /* Traverse the list of keys set by the thread, looking for key */ - SYS_SLIST_FOR_EACH_NODE(&(thread->key_list), node_l) { - thread_spec_data = (pthread_thread_data *)node_l; - if (thread_spec_data->key == key_obj) { - /* Key is present, so get the set thread data */ - value = thread_spec_data->spec_data; - break; + SYS_SLIST_FOR_EACH_NODE(&(thread->key_list), node_l) { + thread_spec_data = (pthread_thread_data *)node_l; + if (thread_spec_data->key == key_obj) { + /* Key is present, so get the set thread data */ + value = thread_spec_data->spec_data; + break; + } } } - k_spin_unlock(&pthread_key_lock, key_key); - return value; } diff --git a/lib/posix/options/mutex.c b/lib/posix/options/mutex.c index ff3f3561f6d645..cd320a969dca8f 100644 --- a/lib/posix/options/mutex.c +++ b/lib/posix/options/mutex.c @@ -12,10 +12,11 @@ #include #include #include +#include LOG_MODULE_REGISTER(pthread_mutex, CONFIG_PTHREAD_MUTEX_LOG_LEVEL); -static struct k_spinlock pthread_mutex_spinlock; +static SYS_SEM_DEFINE(lock, 1, 1); int64_t timespec_to_timeoutms(const struct timespec *abstime); @@ -106,35 +107,42 @@ struct k_mutex *to_posix_mutex(pthread_mutex_t *mu) static int acquire_mutex(pthread_mutex_t *mu, k_timeout_t timeout) { - int type; - size_t bit; - int ret = 0; - struct k_mutex *m; - k_spinlock_key_t key; + int type = -1; + size_t bit = -1; + int ret = EINVAL; + size_t lock_count = -1; + struct k_mutex *m = NULL; + struct k_thread *owner = NULL; + + SYS_SEM_LOCK(&lock) { + m = to_posix_mutex(mu); + if (m == NULL) { + ret = EINVAL; + SYS_SEM_LOCK_BREAK; + } - key = k_spin_lock(&pthread_mutex_spinlock); + LOG_DBG("Locking mutex %p with timeout %llx", m, timeout.ticks); - m = to_posix_mutex(mu); - if (m == NULL) { - k_spin_unlock(&pthread_mutex_spinlock, key); - return EINVAL; + ret = 0; + bit = posix_mutex_to_offset(m); + type = posix_mutex_type[bit]; + owner = m->owner; + lock_count = m->lock_count; } - LOG_DBG("Locking mutex %p with timeout %llx", m, timeout.ticks); - - bit = posix_mutex_to_offset(m); - type = posix_mutex_type[bit]; + if (ret != 0) { + goto handle_error; + } - if (m->owner == k_current_get()) { + if (owner == k_current_get()) { switch (type) { case PTHREAD_MUTEX_NORMAL: if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { - k_spin_unlock(&pthread_mutex_spinlock, key); LOG_DBG("Timeout locking mutex %p", m); - return EBUSY; + ret = EBUSY; + break; } /* On most POSIX systems, this usually results in an infinite loop */ - k_spin_unlock(&pthread_mutex_spinlock, key); LOG_DBG("Attempt to relock non-recursive mutex %p", m); do { (void)k_sleep(K_FOREVER); @@ -142,7 +150,7 @@ static int acquire_mutex(pthread_mutex_t *mu, k_timeout_t timeout) CODE_UNREACHABLE; break; case PTHREAD_MUTEX_RECURSIVE: - if (m->lock_count >= MUTEX_MAX_REC_LOCK) { + if (lock_count >= MUTEX_MAX_REC_LOCK) { LOG_DBG("Mutex %p locked recursively too many times", m); ret = EAGAIN; } @@ -157,7 +165,6 @@ static int acquire_mutex(pthread_mutex_t *mu, k_timeout_t timeout) break; } } - k_spin_unlock(&pthread_mutex_spinlock, key); if (ret == 0) { ret = k_mutex_lock(m, timeout); @@ -171,6 +178,7 @@ static int acquire_mutex(pthread_mutex_t *mu, k_timeout_t timeout) } } +handle_error: if (ret < 0) { LOG_DBG("k_mutex_unlock() failed: %d", ret); ret = -ret; diff --git a/lib/posix/options/pthread.c b/lib/posix/options/pthread.c index fe3c606bdb88ad..e3f8ebf0910d1c 100644 --- a/lib/posix/options/pthread.c +++ b/lib/posix/options/pthread.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -86,8 +87,8 @@ static sys_dlist_t posix_thread_q[] = { SYS_DLIST_STATIC_INIT(&posix_thread_q[POSIX_THREAD_RUN_Q]), SYS_DLIST_STATIC_INIT(&posix_thread_q[POSIX_THREAD_DONE_Q]), }; -static struct posix_thread posix_thread_pool[CONFIG_POSIX_THREAD_THREADS_MAX]; -static struct k_spinlock pthread_pool_lock; +static struct posix_thread posix_thread_pool[CONFIG_MAX_PTHREAD_COUNT]; +static SYS_SEM_DEFINE(pthread_pool_lock, 1, 1); static int pthread_concurrency; static inline void posix_thread_q_set(struct posix_thread *t, enum posix_thread_qid qid) @@ -203,7 +204,7 @@ void __z_pthread_cleanup_push(void *cleanup[3], void (*routine)(void *arg), void struct posix_thread *t = NULL; struct __pthread_cleanup *const c = (struct __pthread_cleanup *)cleanup; - K_SPINLOCK(&pthread_pool_lock) { + SYS_SEM_LOCK(&pthread_pool_lock) { t = to_posix_thread(pthread_self()); BUILD_ASSERT(3 * sizeof(void *) == sizeof(*c)); __ASSERT_NO_MSG(t != NULL); @@ -220,7 +221,7 @@ void __z_pthread_cleanup_pop(int execute) struct __pthread_cleanup *c = NULL; struct posix_thread *t = NULL; - K_SPINLOCK(&pthread_pool_lock) { + SYS_SEM_LOCK(&pthread_pool_lock) { t = to_posix_thread(pthread_self()); __ASSERT_NO_MSG(t != NULL); node = sys_slist_get(&t->cleanup_list); @@ -460,7 +461,6 @@ static K_WORK_DELAYABLE_DEFINE(posix_thread_recycle_work, posix_thread_recycle_w static void posix_thread_finalize(struct posix_thread *t, void *retval) { sys_snode_t *node_l; - k_spinlock_key_t key; pthread_key_obj *key_obj; pthread_thread_data *thread_spec_data; @@ -475,11 +475,11 @@ static void posix_thread_finalize(struct posix_thread *t, void *retval) } /* move thread from run_q to done_q */ - key = k_spin_lock(&pthread_pool_lock); - sys_dlist_remove(&t->q_node); - posix_thread_q_set(t, POSIX_THREAD_DONE_Q); - t->retval = retval; - k_spin_unlock(&pthread_pool_lock, key); + SYS_SEM_LOCK(&pthread_pool_lock) { + sys_dlist_remove(&t->q_node); + posix_thread_q_set(t, POSIX_THREAD_DONE_Q); + t->retval = retval; + } /* trigger recycle work */ (void)k_work_schedule(&posix_thread_recycle_work, K_MSEC(CONFIG_PTHREAD_RECYCLER_DELAY_MS)); @@ -510,22 +510,22 @@ static void zephyr_thread_wrapper(void *arg1, void *arg2, void *arg3) static void posix_thread_recycle(void) { - k_spinlock_key_t key; struct posix_thread *t; struct posix_thread *safe_t; sys_dlist_t recyclables = SYS_DLIST_STATIC_INIT(&recyclables); - key = k_spin_lock(&pthread_pool_lock); - SYS_DLIST_FOR_EACH_CONTAINER_SAFE(&posix_thread_q[POSIX_THREAD_DONE_Q], t, safe_t, q_node) { - if (t->attr.detachstate == PTHREAD_CREATE_JOINABLE) { - /* thread has not been joined yet */ - continue; - } + SYS_SEM_LOCK(&pthread_pool_lock) { + SYS_DLIST_FOR_EACH_CONTAINER_SAFE(&posix_thread_q[POSIX_THREAD_DONE_Q], t, safe_t, + q_node) { + if (t->attr.detachstate == PTHREAD_CREATE_JOINABLE) { + /* thread has not been joined yet */ + continue; + } - sys_dlist_remove(&t->q_node); - sys_dlist_append(&recyclables, &t->q_node); + sys_dlist_remove(&t->q_node); + sys_dlist_append(&recyclables, &t->q_node); + } } - k_spin_unlock(&pthread_pool_lock, key); if (sys_dlist_is_empty(&recyclables)) { return; @@ -541,12 +541,12 @@ static void posix_thread_recycle(void) } } - key = k_spin_lock(&pthread_pool_lock); - while (!sys_dlist_is_empty(&recyclables)) { - t = CONTAINER_OF(sys_dlist_get(&recyclables), struct posix_thread, q_node); - posix_thread_q_set(t, POSIX_THREAD_READY_Q); + SYS_SEM_LOCK(&pthread_pool_lock) { + while (!sys_dlist_is_empty(&recyclables)) { + t = CONTAINER_OF(sys_dlist_get(&recyclables), struct posix_thread, q_node); + posix_thread_q_set(t, POSIX_THREAD_READY_Q); + } } - k_spin_unlock(&pthread_pool_lock, key); } /** @@ -571,7 +571,7 @@ int pthread_create(pthread_t *th, const pthread_attr_t *_attr, void *(*threadrou /* reclaim resources greedily */ posix_thread_recycle(); - K_SPINLOCK(&pthread_pool_lock) { + SYS_SEM_LOCK(&pthread_pool_lock) { if (!sys_dlist_is_empty(&posix_thread_q[POSIX_THREAD_READY_Q])) { t = CONTAINER_OF(sys_dlist_get(&posix_thread_q[POSIX_THREAD_READY_Q]), struct posix_thread, q_node); @@ -587,7 +587,7 @@ int pthread_create(pthread_t *th, const pthread_attr_t *_attr, void *(*threadrou err = pthread_barrier_init(&barrier, NULL, 2); if (err != 0) { /* cannot allocate barrier. move thread back to ready_q */ - K_SPINLOCK(&pthread_pool_lock) { + SYS_SEM_LOCK(&pthread_pool_lock) { sys_dlist_remove(&t->q_node); posix_thread_q_set(t, POSIX_THREAD_READY_Q); } @@ -609,7 +609,7 @@ int pthread_create(pthread_t *th, const pthread_attr_t *_attr, void *(*threadrou } if (err != 0) { /* cannot allocate pthread attributes (e.g. stack) */ - K_SPINLOCK(&pthread_pool_lock) { + SYS_SEM_LOCK(&pthread_pool_lock) { sys_dlist_remove(&t->q_node); posix_thread_q_set(t, POSIX_THREAD_READY_Q); } @@ -657,7 +657,7 @@ int pthread_getconcurrency(void) { int ret = 0; - K_SPINLOCK(&pthread_pool_lock) { + SYS_SEM_LOCK(&pthread_pool_lock) { ret = pthread_concurrency; } @@ -674,7 +674,7 @@ int pthread_setconcurrency(int new_level) return EAGAIN; } - K_SPINLOCK(&pthread_pool_lock) { + SYS_SEM_LOCK(&pthread_pool_lock) { pthread_concurrency = new_level; } @@ -688,21 +688,21 @@ int pthread_setconcurrency(int new_level) */ int pthread_setcancelstate(int state, int *oldstate) { - int ret = 0; - struct posix_thread *t; + int ret = EINVAL; bool cancel_pending = false; - bool cancel_type = PTHREAD_CANCEL_ENABLE; + struct posix_thread *t = NULL; + bool cancel_type = -1; if (state != PTHREAD_CANCEL_ENABLE && state != PTHREAD_CANCEL_DISABLE) { LOG_DBG("Invalid pthread state %d", state); return EINVAL; } - K_SPINLOCK(&pthread_pool_lock) { + SYS_SEM_LOCK(&pthread_pool_lock) { t = to_posix_thread(pthread_self()); if (t == NULL) { ret = EINVAL; - K_SPINLOCK_BREAK; + SYS_SEM_LOCK_BREAK; } if (oldstate != NULL) { @@ -712,14 +712,16 @@ int pthread_setcancelstate(int state, int *oldstate) t->attr.cancelstate = state; cancel_pending = t->attr.cancelpending; cancel_type = t->attr.canceltype; + + ret = 0; } - if (state == PTHREAD_CANCEL_ENABLE && cancel_type == PTHREAD_CANCEL_ASYNCHRONOUS && - cancel_pending) { + if (ret == 0 && state == PTHREAD_CANCEL_ENABLE && + cancel_type == PTHREAD_CANCEL_ASYNCHRONOUS && cancel_pending) { posix_thread_finalize(t, PTHREAD_CANCELED); } - return 0; + return ret; } /** @@ -729,7 +731,7 @@ int pthread_setcancelstate(int state, int *oldstate) */ int pthread_setcanceltype(int type, int *oldtype) { - int ret = 0; + int ret = EINVAL; struct posix_thread *t; if (type != PTHREAD_CANCEL_DEFERRED && type != PTHREAD_CANCEL_ASYNCHRONOUS) { @@ -737,17 +739,19 @@ int pthread_setcanceltype(int type, int *oldtype) return EINVAL; } - K_SPINLOCK(&pthread_pool_lock) { + SYS_SEM_LOCK(&pthread_pool_lock) { t = to_posix_thread(pthread_self()); if (t == NULL) { ret = EINVAL; - K_SPINLOCK_BREAK; + SYS_SEM_LOCK_BREAK; } if (oldtype != NULL) { *oldtype = t->attr.canceltype; } t->attr.canceltype = type; + + ret = 0; } return ret; @@ -760,16 +764,16 @@ int pthread_setcanceltype(int type, int *oldtype) */ void pthread_testcancel(void) { - struct posix_thread *t; bool cancel_pended = false; + struct posix_thread *t = NULL; - K_SPINLOCK(&pthread_pool_lock) { + SYS_SEM_LOCK(&pthread_pool_lock) { t = to_posix_thread(pthread_self()); if (t == NULL) { - K_SPINLOCK_BREAK; + SYS_SEM_LOCK_BREAK; } if (t->attr.cancelstate != PTHREAD_CANCEL_ENABLE) { - K_SPINLOCK_BREAK; + SYS_SEM_LOCK_BREAK; } if (t->attr.cancelpending) { cancel_pended = true; @@ -789,24 +793,25 @@ void pthread_testcancel(void) */ int pthread_cancel(pthread_t pthread) { - int ret = 0; + int ret = ESRCH; bool cancel_state = PTHREAD_CANCEL_ENABLE; bool cancel_type = PTHREAD_CANCEL_DEFERRED; struct posix_thread *t = NULL; - K_SPINLOCK(&pthread_pool_lock) { + SYS_SEM_LOCK(&pthread_pool_lock) { t = to_posix_thread(pthread); if (t == NULL) { ret = ESRCH; - K_SPINLOCK_BREAK; + SYS_SEM_LOCK_BREAK; } if (!__attr_is_initialized(&t->attr)) { /* thread has already terminated */ ret = ESRCH; - K_SPINLOCK_BREAK; + SYS_SEM_LOCK_BREAK; } + ret = 0; t->attr.cancelpending = true; cancel_state = t->attr.cancelstate; cancel_type = t->attr.canceltype; @@ -827,7 +832,7 @@ int pthread_cancel(pthread_t pthread) */ int pthread_setschedparam(pthread_t pthread, int policy, const struct sched_param *param) { - int ret = 0; + int ret = ESRCH; int new_prio = K_LOWEST_APPLICATION_THREAD_PRIO; struct posix_thread *t = NULL; @@ -836,13 +841,14 @@ int pthread_setschedparam(pthread_t pthread, int policy, const struct sched_para return EINVAL; } - K_SPINLOCK(&pthread_pool_lock) { + SYS_SEM_LOCK(&pthread_pool_lock) { t = to_posix_thread(pthread); if (t == NULL) { ret = ESRCH; - K_SPINLOCK_BREAK; + SYS_SEM_LOCK_BREAK; } + ret = 0; new_prio = posix_to_zephyr_priority(param->sched_priority, policy); } @@ -867,7 +873,6 @@ int pthread_setschedprio(pthread_t thread, int prio) struct sched_param param; ret = pthread_getschedparam(thread, &policy, ¶m); - if (ret != 0) { return ret; } @@ -876,13 +881,15 @@ int pthread_setschedprio(pthread_t thread, int prio) return EINVAL; } - K_SPINLOCK(&pthread_pool_lock) { + ret = ESRCH; + SYS_SEM_LOCK(&pthread_pool_lock) { t = to_posix_thread(thread); if (t == NULL) { ret = ESRCH; - K_SPINLOCK_BREAK; + SYS_SEM_LOCK_BREAK; } + ret = 0; new_prio = posix_to_zephyr_priority(prio, policy); } @@ -942,25 +949,26 @@ int pthread_attr_init(pthread_attr_t *_attr) */ int pthread_getschedparam(pthread_t pthread, int *policy, struct sched_param *param) { - int ret = 0; + int ret = ESRCH; struct posix_thread *t; if (policy == NULL || param == NULL) { return EINVAL; } - K_SPINLOCK(&pthread_pool_lock) { + SYS_SEM_LOCK(&pthread_pool_lock) { t = to_posix_thread(pthread); if (t == NULL) { ret = ESRCH; - K_SPINLOCK_BREAK; + SYS_SEM_LOCK_BREAK; } if (!__attr_is_initialized(&t->attr)) { ret = ESRCH; - K_SPINLOCK_BREAK; + SYS_SEM_LOCK_BREAK; } + ret = 0; param->sched_priority = zephyr_to_posix_priority(k_thread_priority_get(&t->thread), policy); } @@ -975,7 +983,7 @@ int pthread_getschedparam(pthread_t pthread, int *policy, struct sched_param *pa */ int pthread_once(pthread_once_t *once, void (*init_func)(void)) { - __unused int ret; + int ret = EINVAL; bool run_init_func = false; struct pthread_once *const _once = (struct pthread_once *)once; @@ -983,18 +991,19 @@ int pthread_once(pthread_once_t *once, void (*init_func)(void)) return EINVAL; } - K_SPINLOCK(&pthread_pool_lock) { + SYS_SEM_LOCK(&pthread_pool_lock) { if (!_once->flag) { run_init_func = true; _once->flag = true; } + ret = 0; } - if (run_init_func) { + if (ret == 0 && run_init_func) { init_func(); } - return 0; + return ret; } /** @@ -1007,10 +1016,10 @@ void pthread_exit(void *retval) { struct posix_thread *self = NULL; - K_SPINLOCK(&pthread_pool_lock) { + SYS_SEM_LOCK(&pthread_pool_lock) { self = to_posix_thread(pthread_self()); if (self == NULL) { - K_SPINLOCK_BREAK; + SYS_SEM_LOCK_BREAK; } /* Mark a thread as cancellable before exiting */ @@ -1036,7 +1045,7 @@ void pthread_exit(void *retval) */ int pthread_join(pthread_t pthread, void **status) { - int ret = 0; + int ret = ESRCH; struct posix_thread *t = NULL; if (pthread == pthread_self()) { @@ -1044,11 +1053,11 @@ int pthread_join(pthread_t pthread, void **status) return EDEADLK; } - K_SPINLOCK(&pthread_pool_lock) { + SYS_SEM_LOCK(&pthread_pool_lock) { t = to_posix_thread(pthread); if (t == NULL) { ret = ESRCH; - K_SPINLOCK_BREAK; + SYS_SEM_LOCK_BREAK; } LOG_DBG("Pthread %p joining..", &t->thread); @@ -1056,18 +1065,19 @@ int pthread_join(pthread_t pthread, void **status) if (t->attr.detachstate != PTHREAD_CREATE_JOINABLE) { /* undefined behaviour */ ret = EINVAL; - K_SPINLOCK_BREAK; + SYS_SEM_LOCK_BREAK; } if (posix_thread_q_get(t) == POSIX_THREAD_READY_Q) { ret = ESRCH; - K_SPINLOCK_BREAK; + SYS_SEM_LOCK_BREAK; } /* * thread is joinable and is in run_q or done_q. * let's ensure that the thread cannot be joined again after this point. */ + ret = 0; t->attr.detachstate = PTHREAD_CREATE_DETACHED; } @@ -1105,23 +1115,24 @@ int pthread_join(pthread_t pthread, void **status) */ int pthread_detach(pthread_t pthread) { - int ret = 0; - struct posix_thread *t; + int ret = ESRCH; + struct posix_thread *t = NULL; - K_SPINLOCK(&pthread_pool_lock) { + SYS_SEM_LOCK(&pthread_pool_lock) { t = to_posix_thread(pthread); if (t == NULL) { ret = ESRCH; - K_SPINLOCK_BREAK; + SYS_SEM_LOCK_BREAK; } if (posix_thread_q_get(t) == POSIX_THREAD_READY_Q || t->attr.detachstate != PTHREAD_CREATE_JOINABLE) { LOG_DBG("Pthread %p cannot be detached", &t->thread); ret = EINVAL; - K_SPINLOCK_BREAK; + SYS_SEM_LOCK_BREAK; } + ret = 0; t->attr.detachstate = PTHREAD_CREATE_DETACHED; } @@ -1414,26 +1425,27 @@ int pthread_atfork(void (*prepare)(void), void (*parent)(void), void (*child)(vo /* this should probably go into signal.c but we need access to the lock */ int pthread_sigmask(int how, const sigset_t *ZRESTRICT set, sigset_t *ZRESTRICT oset) { - int ret = 0; - struct posix_thread *t; + int ret = ESRCH; + struct posix_thread *t = NULL; if (!(how == SIG_BLOCK || how == SIG_SETMASK || how == SIG_UNBLOCK)) { return EINVAL; } - K_SPINLOCK(&pthread_pool_lock) { + SYS_SEM_LOCK(&pthread_pool_lock) { t = to_posix_thread(pthread_self()); if (t == NULL) { ret = ESRCH; - K_SPINLOCK_BREAK; + SYS_SEM_LOCK_BREAK; } if (oset != NULL) { *oset = t->sigset; } + ret = 0; if (set == NULL) { - K_SPINLOCK_BREAK; + SYS_SEM_LOCK_BREAK; } switch (how) { diff --git a/lib/posix/options/rwlock.c b/lib/posix/options/rwlock.c index f282fc26549fed..aad0a4d4713fa8 100644 --- a/lib/posix/options/rwlock.c +++ b/lib/posix/options/rwlock.c @@ -11,13 +11,14 @@ #include #include #include +#include #define CONCURRENT_READER_LIMIT (CONFIG_POSIX_THREAD_THREADS_MAX + 1) struct posix_rwlock { - struct k_sem rd_sem; - struct k_sem wr_sem; - struct k_sem reader_active; /* blocks WR till reader has acquired lock */ + struct sys_sem rd_sem; + struct sys_sem wr_sem; + struct sys_sem reader_active; /* blocks WR till reader has acquired lock */ k_tid_t wr_owner; }; @@ -32,7 +33,7 @@ static uint32_t write_lock_acquire(struct posix_rwlock *rwl, int32_t timeout); LOG_MODULE_REGISTER(pthread_rwlock, CONFIG_PTHREAD_RWLOCK_LOG_LEVEL); -static struct k_spinlock posix_rwlock_spinlock; +static SYS_SEM_DEFINE(posix_rwlock_lock, 1, 1); static struct posix_rwlock posix_rwlock_pool[CONFIG_MAX_PTHREAD_RWLOCK_COUNT]; SYS_BITARRAY_DEFINE_STATIC(posix_rwlock_bitarray, CONFIG_MAX_PTHREAD_RWLOCK_COUNT); @@ -123,9 +124,9 @@ int pthread_rwlock_init(pthread_rwlock_t *rwlock, return ENOMEM; } - k_sem_init(&rwl->rd_sem, CONCURRENT_READER_LIMIT, CONCURRENT_READER_LIMIT); - k_sem_init(&rwl->wr_sem, 1, 1); - k_sem_init(&rwl->reader_active, 1, 1); + sys_sem_init(&rwl->rd_sem, CONCURRENT_READER_LIMIT, CONCURRENT_READER_LIMIT); + sys_sem_init(&rwl->wr_sem, 1, 1); + sys_sem_init(&rwl->reader_active, 1, 1); rwl->wr_owner = NULL; LOG_DBG("Initialized rwlock %p", rwl); @@ -140,22 +141,24 @@ int pthread_rwlock_init(pthread_rwlock_t *rwlock, */ int pthread_rwlock_destroy(pthread_rwlock_t *rwlock) { - int ret = 0; int err; size_t bit; + int ret = EINVAL; struct posix_rwlock *rwl; - rwl = get_posix_rwlock(*rwlock); - if (rwl == NULL) { - return EINVAL; - } + SYS_SEM_LOCK(&posix_rwlock_lock) { + rwl = get_posix_rwlock(*rwlock); + if (rwl == NULL) { + ret = EINVAL; + SYS_SEM_LOCK_BREAK; + } - K_SPINLOCK(&posix_rwlock_spinlock) { if (rwl->wr_owner != NULL) { ret = EBUSY; - K_SPINLOCK_BREAK; + SYS_SEM_LOCK_BREAK; } + ret = 0; bit = posix_rwlock_to_offset(rwl); err = sys_bitarray_free(&posix_rwlock_bitarray, 1, bit); __ASSERT_NO_MSG(err == 0); @@ -328,15 +331,15 @@ int pthread_rwlock_unlock(pthread_rwlock_t *rwlock) if (k_current_get() == rwl->wr_owner) { /* Write unlock */ rwl->wr_owner = NULL; - k_sem_give(&rwl->reader_active); - k_sem_give(&rwl->wr_sem); + sys_sem_give(&rwl->reader_active); + sys_sem_give(&rwl->wr_sem); } else { /* Read unlock */ - k_sem_give(&rwl->rd_sem); + sys_sem_give(&rwl->rd_sem); - if (k_sem_count_get(&rwl->rd_sem) == CONCURRENT_READER_LIMIT) { + if (sys_sem_count_get(&rwl->rd_sem) == CONCURRENT_READER_LIMIT) { /* Last read lock, unlock writer */ - k_sem_give(&rwl->reader_active); + sys_sem_give(&rwl->reader_active); } } return 0; @@ -346,10 +349,10 @@ static uint32_t read_lock_acquire(struct posix_rwlock *rwl, int32_t timeout) { uint32_t ret = 0U; - if (k_sem_take(&rwl->wr_sem, SYS_TIMEOUT_MS(timeout)) == 0) { - k_sem_take(&rwl->reader_active, K_NO_WAIT); - k_sem_take(&rwl->rd_sem, K_NO_WAIT); - k_sem_give(&rwl->wr_sem); + if (sys_sem_take(&rwl->wr_sem, SYS_TIMEOUT_MS(timeout)) == 0) { + sys_sem_take(&rwl->reader_active, K_NO_WAIT); + sys_sem_take(&rwl->rd_sem, K_NO_WAIT); + sys_sem_give(&rwl->wr_sem); } else { ret = EBUSY; } @@ -366,7 +369,7 @@ static uint32_t write_lock_acquire(struct posix_rwlock *rwl, int32_t timeout) k_timeout = SYS_TIMEOUT_MS(timeout); /* waiting for release of write lock */ - if (k_sem_take(&rwl->wr_sem, k_timeout) == 0) { + if (sys_sem_take(&rwl->wr_sem, k_timeout) == 0) { /* update remaining timeout time for 2nd sem */ if (timeout != SYS_FOREVER_MS) { elapsed_time = k_uptime_get() - st_time; @@ -377,10 +380,10 @@ static uint32_t write_lock_acquire(struct posix_rwlock *rwl, int32_t timeout) k_timeout = SYS_TIMEOUT_MS(timeout); /* waiting for reader to complete operation */ - if (k_sem_take(&rwl->reader_active, k_timeout) == 0) { + if (sys_sem_take(&rwl->reader_active, k_timeout) == 0) { rwl->wr_owner = k_current_get(); } else { - k_sem_give(&rwl->wr_sem); + sys_sem_give(&rwl->wr_sem); ret = EBUSY; }