diff --git a/src/BlocksRuntime/runtime.c b/src/BlocksRuntime/runtime.c index 4b7d4bfa2..38ec0a7f1 100644 --- a/src/BlocksRuntime/runtime.c +++ b/src/BlocksRuntime/runtime.c @@ -32,20 +32,11 @@ #define __has_builtin(builtin) 0 #endif -#if __has_builtin(__sync_bool_compare_and_swap) -#define OSAtomicCompareAndSwapInt(_Old, _New, _Ptr) \ - __sync_bool_compare_and_swap(_Ptr, _Old, _New) -#else -#define _CRT_SECURE_NO_WARNINGS 1 -#include +#include static __inline bool OSAtomicCompareAndSwapInt(int oldi, int newi, int volatile *dst) { - // fixme barrier is overkill -- see objc-os.h - int original = InterlockedCompareExchange((LONG volatile *)dst, newi, oldi); - return (original == oldi); + return atomic_compare_exchange_weak((_Atomic(int)*)dst, &oldi, newi); } -#endif - /*********************** Globals ************************/ diff --git a/src/allocator.c b/src/allocator.c index 66284090f..33cd19199 100644 --- a/src/allocator.c +++ b/src/allocator.c @@ -542,31 +542,33 @@ _dispatch_alloc_maybe_madvise_page(dispatch_continuation_t c) } // They are all unallocated, so we could madvise the page. Try to // take ownership of them all. - int last_locked = 0; - do { + for (i = 0; i < BITMAPS_PER_PAGE; i++) { if (!os_atomic_cmpxchg(&page_bitmaps[last_locked], BITMAP_C(0), BITMAP_ALL_ONES, relaxed)) { // We didn't get one; since there is a cont allocated in // the page, we can't madvise. Give up and unlock all. - goto unlock; + break; } - } while (++last_locked < (signed)BITMAPS_PER_PAGE); + } + + if (i == BITMAPS_PER_PAGE) { #if DISPATCH_DEBUG - //fprintf(stderr, "%s: madvised page %p for cont %p (next = %p), " - // "[%u+1]=%u bitmaps at %p\n", __func__, page, c, c->do_next, - // last_locked-1, BITMAPS_PER_PAGE, &page_bitmaps[0]); - // Scribble to expose use-after-free bugs - // madvise (syscall) flushes these stores - memset(page, DISPATCH_ALLOCATOR_SCRIBBLE, DISPATCH_ALLOCATOR_PAGE_SIZE); + // fprintf(stderr, "%s: madvised page %p for cont %p (next = %p), " + // "[%u+1]=%u bitmaps at %p\n", __func__, page, c, c->do_next, + // last_locked-1, BITMAPS_PER_PAGE, &page_bitmaps[0]); + // Scribble to expose use-after-free bugs + // madvise (syscall) flushes these stores + memset(page, DISPATCH_ALLOCATOR_SCRIBBLE, DISPATCH_ALLOCATOR_PAGE_SIZE); #endif - (void)dispatch_assume_zero(madvise(page, DISPATCH_ALLOCATOR_PAGE_SIZE, - MADV_FREE)); + // madvise the page + (void)dispatch_assume_zero(madvise(page, DISPATCH_ALLOCATOR_PAGE_SIZE, + MADV_FREE)); + } -unlock: - while (last_locked > 1) { - page_bitmaps[--last_locked] = BITMAP_C(0); + while (i > 1) { + page_bitmaps[--i] = BITMAP_C(0); } - if (last_locked) { + if (i) { os_atomic_store(&page_bitmaps[0], BITMAP_C(0), relaxed); } return; diff --git a/tests/Foundation/bench.mm b/tests/Foundation/bench.mm index 743c3601b..e7f9f9e54 100644 --- a/tests/Foundation/bench.mm +++ b/tests/Foundation/bench.mm @@ -83,8 +83,8 @@ virtual void virtfunc(void) { return arg; } -static volatile int32_t global; -static volatile int64_t w_global; +static atomic_int global; +static _Atomic(int64_t) w_global; #if TARGET_OS_EMBEDDED static const size_t cnt = 5000000; @@ -191,7 +191,7 @@ static void __attribute__((noinline)) main(void) { pthread_mutex_t plock = PTHREAD_MUTEX_INITIALIZER; - OSSpinLock slock = OS_SPINLOCK_INIT; + os_unfair_lock slock = OS_UNFAIR_LOCK_INIT; BasicObject *bo; BasicClass *bc; pthread_t pthr_pause; @@ -219,8 +219,7 @@ static void __attribute__((noinline)) cycles_per_nanosecond = (long double)freq / (long double)NSEC_PER_SEC; #if BENCH_SLOW - NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init]; - assert(pool); + @autoreleasepool { #endif /* Malloc has different logic for threaded apps. */ @@ -371,9 +370,7 @@ static void __attribute__((noinline)) } print_result2(s, "\"description\" ObjC call:"); - [pool release]; - - pool = NULL; + } // For the autorelease pool #endif s = mach_absolute_time(); @@ -554,30 +551,30 @@ __asm__ __volatile__ ("svc 0x80" : "+r" (_r0) s = mach_absolute_time(); for (i = cnt; i; i--) { - __sync_lock_test_and_set(&global, 0); + atomic_xchg(&global, 0); } print_result(s, "Atomic xchg:"); s = mach_absolute_time(); for (i = cnt; i; i--) { - __sync_val_compare_and_swap(&global, 1, 0); + atomic_cmpxchg(&global, 1, 0); } print_result(s, "Atomic cmpxchg:"); s = mach_absolute_time(); for (i = cnt; i; i--) { - __sync_fetch_and_add(&global, 1); + atomic_fetch_add(&global, 1); } print_result(s, "Atomic increment:"); { - global = 0; - volatile int32_t *g = &global; + global = ATOMIC_VAR_INIT(0); + atomic_int *g = &global; s = mach_absolute_time(); for (i = cnt; i; i--) { uint32_t result; - __sync_and_and_fetch(g, 1); + atomic_fetch_and(g, 1); result = *g; if (result) { abort(); @@ -587,13 +584,13 @@ __asm__ __volatile__ ("svc 0x80" : "+r" (_r0) } { - global = 0; - volatile int32_t *g = &global; + global = ATOMIC_VAR_INIT(0); + atomic_int *g = &global; s = mach_absolute_time(); for (i = cnt; i; i--) { uint32_t result; - result = __sync_and_and_fetch(g, 1); + result = atomic_fetch_and(g, 1); if (result) { abort(); } @@ -601,43 +598,44 @@ __asm__ __volatile__ ("svc 0x80" : "+r" (_r0) print_result(s, "Atomic and-and-fetch, using result:"); } - global = 0; + global = ATOMIC_VAR_INIT(0); s = mach_absolute_time(); for (i = cnt; i; i--) { - OSAtomicIncrement32Barrier(&global); + __c11_atomic_fetch_add(&global, 1, memory_order_seq_cst); } - print_result(s, "OSAtomicIncrement32Barrier:"); + print_result(s, "atomic_fetch_add with memory_order_seq_cst barrier:"); - global = 0; + global = ATOMIC_VAR_INIT(0); s = mach_absolute_time(); for (i = cnt; i; i--) { - OSAtomicIncrement32(&global); + __c11_atomic_fetch_add(&global, 1, memory_order_relaxed); } - print_result(s, "OSAtomicIncrement32:"); + print_result(s, "atomic_fetch_add with memory_order_relaxed barrier:"); - w_global = 0; + w_global = ATOMIC_VAR_INIT(0); s = mach_absolute_time(); for (i = cnt; i; i--) { - OSAtomicIncrement64Barrier(&w_global); + __c11_atomic_fetch_add(&wglobal, 1, memory_order_seq_cst); } - print_result(s, "OSAtomicIncrement64Barrier:"); + print_result(s, "64-bit atomic_fetch_add with memory_order_seq_cst barrier:"); - w_global = 0; + w_global = ATOMIC_VAR_INIT(0); s = mach_absolute_time(); for (i = cnt; i; i--) { - OSAtomicIncrement64(&w_global); + __c11_atomic_fetch_add(&wglobal, 1, memory_order_relaxed); } - print_result(s, "OSAtomicIncrement64:"); + print_result(s, "64-bit atomic_fetch_add with memory_order_seq_cst barrier:"); - global = 0; + global = ATOMIC_VAR_INIT(0); s = mach_absolute_time(); for (i = cnt; i; i--) { - while (!__sync_bool_compare_and_swap(&global, 0, 1)) { + atomic_int zero = ATOMIC_VAR_INIT(0); + while (!atomic_compare_exchange_weak(&global, &zero, 1)) { do { #if defined(__i386__) || defined(__x86_64__) __asm__ __volatile__ ("pause"); @@ -646,16 +644,16 @@ __asm__ __volatile__ ("svc 0x80" : "+r" (_r0) #endif } while (global); } - global = 0; + global = ATOMIC_VAR_INIT(0); } print_result(s, "Inlined spin lock/unlock:"); s = mach_absolute_time(); for (i = cnt; i; i--) { - OSSpinLockLock(&slock); - OSSpinLockUnlock(&slock); + os_unfair_lock_lock(&slock); + os_unfair_lock_unlock(&slock); } - print_result(s, "OSSpinLock/Unlock:"); + print_result(s, "os_unfair_lock_lock/unlock:"); s = mach_absolute_time(); for (i = cnt; i; i--) { diff --git a/tests/Foundation/dispatch_apply_gc.m b/tests/Foundation/dispatch_apply_gc.m index 6f0ac6a7a..7b877e3d9 100644 --- a/tests/Foundation/dispatch_apply_gc.m +++ b/tests/Foundation/dispatch_apply_gc.m @@ -30,26 +30,25 @@ #else const size_t final = 1000, desclen = 8892; #endif -NSAutoreleasePool *pool = nil; static void work(void* ctxt __attribute__((unused))) { - pool = [[NSAutoreleasePool alloc] init]; + @autoreleasepool { NSMutableArray *a = [NSMutableArray array]; - OSSpinLock sl = OS_SPINLOCK_INIT, *l = &sl; + os_unfair_lock sl = OS_UNFAIR_LOCK_INIT, *l = &sl; dispatch_apply(final, dispatch_get_global_queue(0, 0), ^(size_t i){ NSDecimalNumber *n = [NSDecimalNumber decimalNumberWithDecimal: [[NSNumber numberWithInteger:i] decimalValue]]; - OSSpinLockLock(l); + os_unfair_lock_lock(l); [a addObject:n]; - OSSpinLockUnlock(l); + os_unfair_lock_unlock(l); }); test_long("count", [a count], final); test_long("description length", [[a description] length], desclen); a = nil; - [pool drain]; + } test_stop_after_delay((void*)(intptr_t)1); } diff --git a/tests/Foundation/nsoperation.m b/tests/Foundation/nsoperation.m index a0421353d..220a7852b 100644 --- a/tests/Foundation/nsoperation.m +++ b/tests/Foundation/nsoperation.m @@ -54,7 +54,7 @@ - (void)main { dispatch_test_start("NSOperation"); - NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init]; + @ autoreleasepool { NSOperationQueue *queue = [[[NSOperationQueue alloc] init] autorelease]; test_ptr_notnull("NSOperationQueue", queue); @@ -67,7 +67,7 @@ - (void)main [[NSRunLoop mainRunLoop] run]; - [pool release]; + } return 0; } diff --git a/tests/dispatch_after.c b/tests/dispatch_after.c index 2b46dc903..e0d9b2b5a 100644 --- a/tests/dispatch_after.c +++ b/tests/dispatch_after.c @@ -25,9 +25,6 @@ #endif #include #include -#ifdef __APPLE__ -#include -#endif #include #include diff --git a/tests/dispatch_apply.c b/tests/dispatch_apply.c index 1f2dfb470..39b9e2343 100644 --- a/tests/dispatch_apply.c +++ b/tests/dispatch_apply.c @@ -32,15 +32,13 @@ #endif #include #include -#ifdef __APPLE__ -#include -#endif +#include #include #include #include "dispatch_test.h" -static volatile int32_t busy_threads_started, busy_threads_finished; +static atomic_int busy_threads_started, busy_threads_finished; /* * Keep a thread busy, spinning on the CPU. @@ -57,7 +55,7 @@ static void busythread(void *ignored) /* prevent i and j been optimized out */ volatile uint64_t i = 0, j = 0; - OSAtomicIncrement32(&busy_threads_started); + __c11_atomic_fetch_add(&busy_threads_started, 1, memory_order_relaxed); while(!all_done) { @@ -67,7 +65,7 @@ static void busythread(void *ignored) } (void)j; - OSAtomicIncrement32(&busy_threads_finished); + __c11_atomic_fetch_add(&busy_threads_finished, 1, memory_order_relaxed); } /* @@ -103,12 +101,13 @@ static void test_apply_contended(dispatch_queue_t dq) usleep(1); } - volatile __block int32_t count = 0; + + __block atomic_int count = ATOMIC_VAR_INIT(0); const int32_t final = 32; int32_t before = busy_threads_started; dispatch_apply(final, dq, ^(size_t i __attribute__((unused))) { - OSAtomicIncrement32(&count); + __c11_atomic_fetch_add(&count, 1, memory_order_relaxed); }); int32_t after = busy_threads_finished; @@ -129,14 +128,14 @@ main(void) { dispatch_test_start("Dispatch Apply"); - volatile __block int32_t count = 0; + __block atomic_int count = ATOMIC_VAR_INIT(0); const int32_t final = 32; dispatch_queue_t queue = dispatch_get_global_queue(0, 0); test_ptr_notnull("dispatch_get_global_queue", queue); dispatch_apply(final, queue, ^(size_t i __attribute__((unused))) { - OSAtomicIncrement32(&count); + __c11_atomic_fetch_add(&count, 1, memory_order_relaxed); }); test_long("count", count, final); @@ -144,7 +143,7 @@ main(void) dispatch_apply(final, queue, ^(size_t i __attribute__((unused))) { dispatch_apply(final, queue, ^(size_t ii __attribute__((unused))) { dispatch_apply(final, queue, ^(size_t iii __attribute__((unused))) { - OSAtomicIncrement32(&count); + __c11_atomic_fetch_add(&count, 1, memory_order_relaxed); }); }); }); diff --git a/tests/dispatch_cascade.c b/tests/dispatch_cascade.c index fce2790cb..1835be880 100644 --- a/tests/dispatch_cascade.c +++ b/tests/dispatch_cascade.c @@ -108,7 +108,7 @@ cascade(void* context) dispatch_async_f(queues[idx], context, cascade); } - if (__sync_sub_and_fetch(&iterations, 1) == 0) { + if (atomic_fetch_sub(&iterations, 1, memory_order_relaxed) - 1 == 0) { done = 1; histogram(); dispatch_async_f(dispatch_get_main_queue(), NULL, cleanup); diff --git a/tests/dispatch_cf_main.c b/tests/dispatch_cf_main.c index 4d11cd246..f2ce2455c 100644 --- a/tests/dispatch_cf_main.c +++ b/tests/dispatch_cf_main.c @@ -21,20 +21,18 @@ #include #include #include -#ifdef __APPLE__ -#include -#endif +#include #include #include "dispatch_test.h" const int32_t final = 10; -static volatile int32_t count; +static atomic_int count = ATOMIC_VAR_INIT(0); static void work(void* ctxt __attribute__((unused))) { - int32_t c = OSAtomicIncrement32(&count); + int32_t c = __c11_atomic_fetch_add(&count, 1, memory_order_relaxed)+1; if (c < final-1) { dispatch_async_f(dispatch_get_main_queue(), NULL, work); CFRunLoopPerformBlock(CFRunLoopGetMain(), kCFRunLoopDefaultMode, ^{ diff --git a/tests/dispatch_concur.c b/tests/dispatch_concur.c index cb37b0889..84bcba75a 100644 --- a/tests/dispatch_concur.c +++ b/tests/dispatch_concur.c @@ -37,7 +37,7 @@ #include #include "dispatch_test.h" -static volatile size_t done, concur; +static _Atomic(size_t) done, concur; static int use_group_async; static uint32_t activecpu; static uint32_t min_acceptable_concurrency; @@ -57,7 +57,7 @@ static void work(void* ctxt __attribute__((unused))) { usleep(1000); - __sync_add_and_fetch(&done, 1); + atomic_fetch_add(&done, 1, memory_order_relaxed); if (!use_group_async) dispatch_group_leave(gw); } @@ -65,7 +65,7 @@ work(void* ctxt __attribute__((unused))) static void submit_work(void* ctxt) { - size_t c = __sync_add_and_fetch(&concur, 1), *m = (size_t *)ctxt, i; + size_t c =atomic_fetch_add(&concur, 1, memory_order_relaxed) + 1, *m = (size_t *)ctxt, i; if (c > *m) *m = c; for (i = 0; i < workers; ++i) { @@ -78,7 +78,7 @@ submit_work(void* ctxt) } usleep(10000); - __sync_sub_and_fetch(&concur, 1); + atomic_fetch_sub(&concur, 1, memory_order_relaxed); if (!use_group_async) dispatch_group_leave(g); } @@ -132,11 +132,11 @@ test_concur_async(size_t n, size_t qw) static void sync_work(void* ctxt) { - size_t c = __sync_add_and_fetch(&concur, 1), *m = (size_t *)ctxt; + size_t c = atomic_fetch_add(&concur, 1, memory_order_relaxed) + 1, *m = (size_t *)ctxt; if (c > *m) *m = c; usleep(10000); - __sync_sub_and_fetch(&concur, 1); + atomic_fetch_sub(&concur, 1, memory_order_relaxed); } static void @@ -172,11 +172,11 @@ test_concur_sync(size_t n, size_t qw) static void apply_work(void* ctxt, size_t i) { - size_t c = __sync_add_and_fetch(&concur, 1), *m = ((size_t *)ctxt) + i; + size_t c = atomic_fetch_add(&concur, 1, memory_order_relaxed) + 1, *m = ((size_t *)ctxt) + i; if (c > *m) *m = c; usleep(100000); - __sync_sub_and_fetch(&concur, 1); + atomic_fetch_sub(&concur, 1, memory_order_relaxed); } static void diff --git a/tests/dispatch_context_for_key.c b/tests/dispatch_context_for_key.c index cecf48c56..dd9c76d96 100644 --- a/tests/dispatch_context_for_key.c +++ b/tests/dispatch_context_for_key.c @@ -21,6 +21,7 @@ #include #include #include +#include #if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) #include #endif @@ -33,14 +34,14 @@ static const char *ctxts[] = {"ctxt for app", "ctxt for key 1", "ctxt for key 2", "ctxt for key 1 bis", "ctxt for key 4"}; -volatile long ctxts_destroyed; +atomic_long ctxts_destroyed; static dispatch_group_t g; static void destructor(void *ctxt) { fprintf(stderr, "destructor of %s\n", (char*)ctxt); - (void)__sync_add_and_fetch(&ctxts_destroyed, 1); + atomic_fetch_sub(&ctxts_destroyed, 1, memory_order_relaxed); dispatch_group_leave(g); } diff --git a/tests/dispatch_deadname.c b/tests/dispatch_deadname.c index 3c752800e..2ed9a2223 100644 --- a/tests/dispatch_deadname.c +++ b/tests/dispatch_deadname.c @@ -98,7 +98,7 @@ test_mach_debug_port(mach_port_t name, const char *str, unsigned int line) #endif static dispatch_group_t g; -static volatile long sent, received; +static atomic_long sent, received; void test_dead_name(void) @@ -202,7 +202,7 @@ test_receive_and_dead_name(void) ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_MACH_RECV, mp, 0, dispatch_get_global_queue(0, 0)); dispatch_source_set_event_handler(ds, ^{ - __sync_add_and_fetch(&received, 1); + atomic_fetch_add(&received, 1, memory_order_relaxed); usleep(100000); // rdar://problem/7676437 race with send source re-arm mach_msg_empty_rcv_t msg = { .header = { .msgh_size = sizeof(mach_msg_empty_rcv_t), @@ -263,7 +263,7 @@ send_until_timeout(mach_port_t mp) test_mach_error("mach_msg(MACH_SEND_MSG)", kr, KERN_SUCCESS); if (kr) test_stop(); } - } while (!kr && __sync_add_and_fetch(&sent, 1) < TEST_SP_MSGCOUNT); + } while (!kr && (atomic_fetch_add(&sent, 1, memory_order_relaxed) + 1) < TEST_SP_MSGCOUNT); test_mach_debug_port(mp); return kr; } @@ -275,8 +275,8 @@ test_send_possible(void) // rdar://problem/8758200 kern_return_t kr; mach_port_t mp; - sent = 0; - received = 0; + sent = ATOMIC_VAR_INIT(0); + received = ATOMIC_VAR_INIT(0); dispatch_group_enter(g); kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &mp); test_mach_error("mach_port_allocate", kr, KERN_SUCCESS); @@ -338,7 +338,7 @@ test_send_possible(void) // rdar://problem/8758200 test_mach_error("mach_msg(MACH_RCV_MSG)", kr, KERN_SUCCESS); if (kr) test_stop(); } - } while (!kr && __sync_add_and_fetch(&received, 1)); + } while (!kr && (atomic_fetch_add(&received, 1, memory_order_relaxed) + 1)); test_mach_debug_port(mp); }); dispatch_source_set_cancel_handler(ds, ^{ @@ -390,7 +390,7 @@ static boolean_t test_mig_callback(mach_msg_header_t *message __attribute__((unused)), mach_msg_header_t *reply) { - __sync_add_and_fetch(&received, 1); + atomic_fetch_add(&received, 1, memory_order_relaxed); reply->msgh_remote_port = 0; return false; } @@ -402,7 +402,7 @@ test_mig_server_large_msg(void) // rdar://problem/8422992 kern_return_t kr; mach_port_t mp; - received = 0; + received = ATOMIC_VAR_INIT(0); dispatch_group_enter(g); kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &mp); test_mach_error("mach_port_allocate", kr, KERN_SUCCESS); diff --git a/tests/dispatch_group.c b/tests/dispatch_group.c index af5974225..87cc89192 100644 --- a/tests/dispatch_group.c +++ b/tests/dispatch_group.c @@ -26,9 +26,6 @@ #include #include #include -#ifdef __APPLE__ -#include -#endif #include #include #include "dispatch_test.h" diff --git a/tests/dispatch_overcommit.c b/tests/dispatch_overcommit.c index d2fca3b5c..3a535c0e4 100644 --- a/tests/dispatch_overcommit.c +++ b/tests/dispatch_overcommit.c @@ -30,14 +30,12 @@ #endif #include #include -#ifdef __APPLE__ -#include -#endif +#include #include #include "dispatch_test.h" -int32_t count = 0; +atomic_int count = ATOMIC_VAR_INIT(0); const int32_t final = 32; int @@ -56,7 +54,7 @@ main(void) dispatch_set_target_queue(queue, dispatch_get_global_queue(0, DISPATCH_QUEUE_OVERCOMMIT)); dispatch_async(queue, ^{ - OSAtomicIncrement32(&count); + __c11_atomic_fetch_add(&count, 1, memory_order_relaxed); if (count == final) { test_long("count", count, final); test_stop(); diff --git a/tests/dispatch_priority.c b/tests/dispatch_priority.c index 49cb4dc89..5af0b6972 100644 --- a/tests/dispatch_priority.c +++ b/tests/dispatch_priority.c @@ -41,7 +41,7 @@ #include #include "dispatch_test.h" -static volatile int done; +static atomic_int done; #ifdef DISPATCH_QUEUE_PRIORITY_BACKGROUND // #define USE_BACKGROUND_PRIORITY 1 @@ -75,7 +75,7 @@ static union { char padding[64]; } counts[PRIORITIES]; -static volatile long iterations; +static atomic_long iterations; static long total; static size_t prio0, priorities = PRIORITIES; @@ -143,13 +143,13 @@ cpubusy(void* context) if (done) break; } - volatile long *count = context; - long iterdone = __sync_sub_and_fetch(&iterations, 1); + atomic_long * count = context; + long iterdone = atomic_fetch_sub(&iterations, 1, memory_order_relaxed) - 1; if (iterdone >= 0) { - __sync_add_and_fetch(count, 1); + atomic_fetch_add(count, 1, memory_order_relaxed); if (!iterdone) { - __sync_add_and_fetch(&done, 1); + atomic_fetch_add(done, 1, memory_order_relaxed); usleep(100000); histogram(); dispatch_time_t delay = DISPATCH_TIME_NOW; @@ -165,7 +165,7 @@ submit_work(dispatch_queue_t queue, void* context) { int i; - for (i = n_blocks(); i; --i) { + for (i = n_blocks(); i > 0; --i) { dispatch_async_f(queue, context, cpubusy); } diff --git a/tests/dispatch_proc.c b/tests/dispatch_proc.c index 0c0a18d0c..0147a13ae 100644 --- a/tests/dispatch_proc.c +++ b/tests/dispatch_proc.c @@ -27,9 +27,7 @@ #include #include #include -#ifdef __APPLE__ -#include -#endif +#include #include #include "dispatch_test.h" diff --git a/tests/dispatch_readsync.c b/tests/dispatch_readsync.c index a59621dd1..bc1ed310f 100644 --- a/tests/dispatch_readsync.c +++ b/tests/dispatch_readsync.c @@ -48,43 +48,43 @@ #endif static dispatch_group_t g; -static volatile size_t r_count, w_count, workers, readers, writers, crw, count, drain; +static _Atomic(size_t) r_count, w_count, workers, readers, writers, crw, count, drain; static void writer(void *ctxt) { - size_t w = __sync_add_and_fetch(&writers, 1), *m = (size_t *)ctxt; + size_t w = atomic_fetch_add(&writers, 1, memory_order_relaxed) + 1, *m = (size_t *)ctxt; if (w > *m) *m = w; usleep(10000); size_t busy = BUSY; - while (busy--) if (readers) __sync_add_and_fetch(&crw, 1); + while (busy--) if (readers) atomic_fetch_add(&crw, 1, memory_order_relaxed); - if (__sync_sub_and_fetch(&w_count, 1) == 0) { + if (atomic_fetch_sub(&w_count, 1, memory_order_relaxed) == 0) { if (r_count == 0) { dispatch_async(dispatch_get_main_queue(), ^{test_stop();}); } } - __sync_sub_and_fetch(&writers, 1); + atomic_fetch_sub(&writers, 1, memory_order_relaxed) dispatch_group_leave(g); } static void reader(void *ctxt) { - size_t r = __sync_add_and_fetch(&readers, 1), *m = (size_t *)ctxt; + size_t r = atomic_fetch_add(&readers, 1, memory_order_relaxed) + 1, *m = (size_t *)ctxt; if (r > *m) *m = r; usleep(10000); size_t busy = BUSY; - while (busy--) if (writers) __sync_add_and_fetch(&crw, 1); + while (busy--) if (writers) atomic_fetch_add(&crw, 1, memory_order_relaxed); - if (__sync_sub_and_fetch(&r_count, 1) == 0) { + if (atomic_fetch_sub(&r_count, 1, memory_order_relaxed) - 1 == 0) { if (r_count == 0) { dispatch_async(dispatch_get_main_queue(), ^{test_stop();}); } } - __sync_sub_and_fetch(&readers, 1); + atomic_fetch_sub(&readers, 1, memory_order_relaxed); } static void @@ -101,12 +101,12 @@ test_readsync(dispatch_queue_t rq, dispatch_queue_t wq, size_t n) dispatch_group_async(g, dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_HIGH, DISPATCH_QUEUE_OVERCOMMIT), ^{ - __sync_add_and_fetch(&workers, 1); + atomic_fetch_add(&workers, 1, memory_order_relaxed); do { usleep(100000); } while (workers < n); for (;;) { - size_t idx = __sync_add_and_fetch(&count, 1); + size_t idx = atomic_fetch_add(&count, 1, memory_order_relaxed) + 1; if (idx > LAPS) break; dispatch_sync_f(rq, mr, reader); if (!(idx % INTERVAL)) { @@ -116,10 +116,10 @@ test_readsync(dispatch_queue_t rq, dispatch_queue_t wq, size_t n) dispatch_sync_f(rq, mr, reader); if (!(idx % (INTERVAL*10))) { // Let the queue drain - __sync_add_and_fetch(&drain, 1); + atomic_fetch_sub(&drain, 1, memory_order_relaxed); usleep(10000); dispatch_barrier_sync(wq, ^{}); - __sync_sub_and_fetch(&drain, 1); + atomic_fetch_sub(&drain, 1, memory_order_relaxed); } else while (drain) usleep(1000); } }); diff --git a/tests/dispatch_timer_short.c b/tests/dispatch_timer_short.c index eb948ab37..f5d5f5274 100644 --- a/tests/dispatch_timer_short.c +++ b/tests/dispatch_timer_short.c @@ -21,11 +21,11 @@ #include #include #include +#include #include #include #ifdef __APPLE__ #include -#include #endif #include @@ -42,7 +42,7 @@ static dispatch_source_t t[N]; static dispatch_queue_t q; static dispatch_group_t g; -static volatile int32_t count; +static atomic_uint count; static mach_timebase_info_data_t tbi; static uint64_t start, last; @@ -52,7 +52,7 @@ static void test_fin(void *cxt) { - uint32_t finalCount = (uint32_t)count; + unsigned int finalCount = count; fprintf(stderr, "Called back every %llu us on average\n", (delay/finalCount)/NSEC_PER_USEC); test_long_less_than("Frequency", 1, @@ -110,7 +110,7 @@ test_short_timer(void) fprintf(stderr, "First timer callback (after %4llu ms)\n", elapsed_ms(start)); } - OSAtomicIncrement32(&count); + __c11_atomic_fetch_add(&count, 1, memory_order_relaxed); if (elapsed_ms(last) >= 100) { fprintf(stderr, "%5d timer callbacks (after %4llu ms)\n", count, elapsed_ms(start)); diff --git a/tests/dispatch_vm.c b/tests/dispatch_vm.c index 2b1b9d6f4..270a7561c 100644 --- a/tests/dispatch_vm.c +++ b/tests/dispatch_vm.c @@ -24,9 +24,7 @@ #if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) #include #endif -#ifdef __APPLE__ -#include -#endif +#include #include #ifdef __ANDROID__ #include @@ -61,8 +59,8 @@ #endif static char **pages; -static volatile int32_t handler_call_count; -static volatile int32_t page_count; +static atomic_int handler_call_count; +static atomic_int page_count; static int32_t max_page_count; static dispatch_source_t vm_source; static dispatch_queue_t vm_queue; @@ -140,7 +138,7 @@ main(void) test_skip("Memory pressure at start of test"); cleanup(); } - if (OSAtomicIncrement32Barrier(&handler_call_count) != NOTIFICATIONS) { + if (__c11_atomic_fetch_add(&handler_call_count, 1, memory_order_seq_cst) + 1 != NOTIFICATIONS) { log_msg("Ignoring vm pressure notification\n"); interval = 1; return; @@ -165,7 +163,8 @@ main(void) } bzero(p, ALLOC_SIZE); pages[page_count] = p; - if (!(OSAtomicIncrement32Barrier(&page_count) % interval)) { + __c11_atomic_fetch_add(&handler_call_count, 1, memory_order_seq_cst); + if (!(__c11_atomic_fetch_add(&handler_call_count, 1, memory_order_seq_cst) + 1) % interval) { log_msg("Allocated %ldMB\n", pg2mb(page_count)); usleep(200000); } diff --git a/tests/generic_unix_port.h b/tests/generic_unix_port.h index f2c82f1ca..e6fc9a2bd 100644 --- a/tests/generic_unix_port.h +++ b/tests/generic_unix_port.h @@ -1,24 +1,6 @@ #include #include -static inline int32_t -OSAtomicIncrement32(volatile int32_t *var) -{ - return __c11_atomic_fetch_add((_Atomic(int)*)var, 1, __ATOMIC_RELAXED)+1; -} - -static inline int32_t -OSAtomicIncrement32Barrier(volatile int32_t *var) -{ - return __c11_atomic_fetch_add((_Atomic(int)*)var, 1, __ATOMIC_SEQ_CST)+1; -} - -static inline int32_t -OSAtomicAdd32(int32_t val, volatile int32_t *var) -{ - return __c11_atomic_fetch_add((_Atomic(int)*)var, val, __ATOMIC_RELAXED)+val; -} - // Simulation of mach_absolute_time related infrastructure // For now, use gettimeofday. // Consider using clockgettime(CLOCK_MONOTONIC) instead. diff --git a/tests/generic_win_port.h b/tests/generic_win_port.h index d693c7453..fcefa4fd8 100644 --- a/tests/generic_win_port.h +++ b/tests/generic_win_port.h @@ -23,24 +23,6 @@ struct mach_timebase_info { typedef struct mach_timebase_info *mach_timebase_info_t; typedef struct mach_timebase_info mach_timebase_info_data_t; -static inline int32_t -OSAtomicIncrement32(volatile int32_t *var) -{ - return __c11_atomic_fetch_add((_Atomic(int)*)var, 1, __ATOMIC_RELAXED)+1; -} - -static inline int32_t -OSAtomicIncrement32Barrier(volatile int32_t *var) -{ - return __c11_atomic_fetch_add((_Atomic(int)*)var, 1, __ATOMIC_SEQ_CST)+1; -} - -static inline int32_t -OSAtomicAdd32(int32_t val, volatile int32_t *var) -{ - return __c11_atomic_fetch_add((_Atomic(int)*)var, val, __ATOMIC_RELAXED)+val; -} - WCHAR * argv_to_command_line(char **argv);