Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Convert deprecated functions to use modern atomic variable handling #806

Draft
wants to merge 1 commit into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/BlocksRuntime/Block_private.h
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ enum {
struct Block_byref {
void *isa;
struct Block_byref *forwarding;
volatile int32_t flags; // contains ref count
_Atomic(int32_t) flags; // contains ref count
uint32_t size;
};

Expand Down
40 changes: 14 additions & 26 deletions src/BlocksRuntime/runtime.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,20 +32,7 @@
#define __has_builtin(builtin) 0
#endif

#if __has_builtin(__sync_bool_compare_and_swap)
#define OSAtomicCompareAndSwapInt(_Old, _New, _Ptr) \
__sync_bool_compare_and_swap(_Ptr, _Old, _New)
#else
#define _CRT_SECURE_NO_WARNINGS 1
#include <Windows.h>
static __inline bool OSAtomicCompareAndSwapInt(int oldi, int newi,
int volatile *dst) {
// fixme barrier is overkill -- see objc-os.h
int original = InterlockedCompareExchange((LONG volatile *)dst, newi, oldi);
return (original == oldi);
}
#endif

#include <stdatomic.h>
/***********************
Globals
************************/
Expand All @@ -64,21 +51,22 @@ Internal Utilities
********************************************************************************/


static int32_t latching_incr_int(volatile int32_t *where) {
static int32_t latching_incr_int(_Atomic(int32_t) *where) {
while (1) {
int32_t old_value = *where;
int32_t old_value = atomic_load_explicit(where, memory_order_relaxed);
if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
return BLOCK_REFCOUNT_MASK;
}
if (OSAtomicCompareAndSwapInt(old_value, old_value+2, where)) {

if (atomic_compare_exchange_weak(where, &old_value, old_value + 2)) {
return old_value+2;
}
}
}

static bool latching_incr_int_not_deallocating(volatile int32_t *where) {
static bool latching_incr_int_not_deallocating(_Atomic(int32_t) *where) {
while (1) {
int32_t old_value = *where;
int32_t old_value = atomic_load_explicit(where, memory_order_relaxed);
if (old_value & BLOCK_DEALLOCATING) {
// if deallocating we can't do this
return false;
Expand All @@ -87,7 +75,7 @@ static bool latching_incr_int_not_deallocating(volatile int32_t *where) {
// if latched, we're leaking this block, and we succeed
return true;
}
if (OSAtomicCompareAndSwapInt(old_value, old_value+2, where)) {
if (atomic_compare_exchange_weak(where, &old_value, old_value + 2)) {
// otherwise, we must store a new retained value without the deallocating bit set
return true;
}
Expand All @@ -96,9 +84,9 @@ static bool latching_incr_int_not_deallocating(volatile int32_t *where) {


// return should_deallocate?
static bool latching_decr_int_should_deallocate(volatile int32_t *where) {
static bool latching_decr_int_should_deallocate(_Atomic(int32_t) *where) {
while (1) {
int32_t old_value = *where;
int32_t old_value = atomic_load_explicit(where, memory_order_relaxed);
if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
return false; // latched high
}
Expand All @@ -111,24 +99,24 @@ static bool latching_decr_int_should_deallocate(volatile int32_t *where) {
new_value = old_value - 1;
result = true;
}
if (OSAtomicCompareAndSwapInt(old_value, new_value, where)) {
if (atomic_compare_exchange_weak(where, &old_value, new_value)) {
return result;
}
}
}

// hit zero?
static bool latching_decr_int_now_zero(volatile int32_t *where) {
static bool latching_decr_int_now_zero(_Atomic(int32_t) *where) {
while (1) {
int32_t old_value = *where;
int32_t old_value = atomic_load_explicit(where, memory_order_relaxed);
if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
return false; // latched high
}
if ((old_value & BLOCK_REFCOUNT_MASK) == 0) {
return false; // underflow, latch low
}
int32_t new_value = old_value - 2;
if (OSAtomicCompareAndSwapInt(old_value, new_value, where)) {
if (atomic_compare_exchange_weak(where, &old_value, new_value)) {
return (new_value & BLOCK_REFCOUNT_MASK) == 0;
}
}
Expand Down
36 changes: 19 additions & 17 deletions src/allocator.c
Original file line number Diff line number Diff line change
Expand Up @@ -542,31 +542,33 @@ _dispatch_alloc_maybe_madvise_page(dispatch_continuation_t c)
}
// They are all unallocated, so we could madvise the page. Try to
// take ownership of them all.
int last_locked = 0;
do {
if (!os_atomic_cmpxchg(&page_bitmaps[last_locked], BITMAP_C(0),
for (i = 0; i < BITMAPS_PER_PAGE; i++) {
if (!os_atomic_cmpxchg(&page_bitmaps[i], BITMAP_C(0),
BITMAP_ALL_ONES, relaxed)) {
// We didn't get one; since there is a cont allocated in
// the page, we can't madvise. Give up and unlock all.
goto unlock;
break;
}
} while (++last_locked < (signed)BITMAPS_PER_PAGE);
}

if (i >= BITMAPS_PER_PAGE) {
#if DISPATCH_DEBUG
//fprintf(stderr, "%s: madvised page %p for cont %p (next = %p), "
// "[%u+1]=%u bitmaps at %p\n", __func__, page, c, c->do_next,
// last_locked-1, BITMAPS_PER_PAGE, &page_bitmaps[0]);
// Scribble to expose use-after-free bugs
// madvise (syscall) flushes these stores
memset(page, DISPATCH_ALLOCATOR_SCRIBBLE, DISPATCH_ALLOCATOR_PAGE_SIZE);
// fprintf(stderr, "%s: madvised page %p for cont %p (next = %p), "
// "[%u+1]=%u bitmaps at %p\n", __func__, page, c, c->do_next,
// last_locked-1, BITMAPS_PER_PAGE, &page_bitmaps[0]);
// Scribble to expose use-after-free bugs
// madvise (syscall) flushes these stores
memset(page, DISPATCH_ALLOCATOR_SCRIBBLE, DISPATCH_ALLOCATOR_PAGE_SIZE);
#endif
(void)dispatch_assume_zero(madvise(page, DISPATCH_ALLOCATOR_PAGE_SIZE,
MADV_FREE));
// madvise the page
(void)dispatch_assume_zero(madvise(page, DISPATCH_ALLOCATOR_PAGE_SIZE,
MADV_FREE));
}

unlock:
while (last_locked > 1) {
page_bitmaps[--last_locked] = BITMAP_C(0);
while (i > 1) {
page_bitmaps[--i] = BITMAP_C(0);
}
if (last_locked) {
if (i) {
os_atomic_store(&page_bitmaps[0], BITMAP_C(0), relaxed);
}
return;
Expand Down
68 changes: 33 additions & 35 deletions tests/Foundation/bench.mm
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,8 @@ virtual void virtfunc(void) {
return arg;
}

static volatile int32_t global;
static volatile int64_t w_global;
static atomic_int global;
static _Atomic(int64_t) w_global;

#if TARGET_OS_EMBEDDED
static const size_t cnt = 5000000;
Expand Down Expand Up @@ -191,7 +191,7 @@ static void __attribute__((noinline))
main(void)
{
pthread_mutex_t plock = PTHREAD_MUTEX_INITIALIZER;
OSSpinLock slock = OS_SPINLOCK_INIT;
os_unfair_lock slock = OS_UNFAIR_LOCK_INIT;
BasicObject *bo;
BasicClass *bc;
pthread_t pthr_pause;
Expand Down Expand Up @@ -219,8 +219,7 @@ static void __attribute__((noinline))
cycles_per_nanosecond = (long double)freq / (long double)NSEC_PER_SEC;

#if BENCH_SLOW
NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
assert(pool);
@autoreleasepool {
#endif

/* Malloc has different logic for threaded apps. */
Expand Down Expand Up @@ -371,9 +370,7 @@ static void __attribute__((noinline))
}
print_result2(s, "\"description\" ObjC call:");

[pool release];

pool = NULL;
} // For the autorelease pool
#endif

s = mach_absolute_time();
Expand Down Expand Up @@ -554,30 +551,30 @@ __asm__ __volatile__ ("svc 0x80" : "+r" (_r0)

s = mach_absolute_time();
for (i = cnt; i; i--) {
__sync_lock_test_and_set(&global, 0);
atomic_xchg(&global, 0);
}
print_result(s, "Atomic xchg:");

s = mach_absolute_time();
for (i = cnt; i; i--) {
__sync_val_compare_and_swap(&global, 1, 0);
atomic_cmpxchg(&global, 1, 0);
}
print_result(s, "Atomic cmpxchg:");

s = mach_absolute_time();
for (i = cnt; i; i--) {
__sync_fetch_and_add(&global, 1);
atomic_fetch_add(&global, 1);
}
print_result(s, "Atomic increment:");

{
global = 0;
volatile int32_t *g = &global;
global = ATOMIC_VAR_INIT(0);
atomic_int *g = &global;

s = mach_absolute_time();
for (i = cnt; i; i--) {
uint32_t result;
__sync_and_and_fetch(g, 1);
atomic_fetch_and(g, 1);
result = *g;
if (result) {
abort();
Expand All @@ -587,57 +584,58 @@ __asm__ __volatile__ ("svc 0x80" : "+r" (_r0)
}

{
global = 0;
volatile int32_t *g = &global;
global = ATOMIC_VAR_INIT(0);
atomic_int *g = &global;

s = mach_absolute_time();
for (i = cnt; i; i--) {
uint32_t result;
result = __sync_and_and_fetch(g, 1);
result = atomic_fetch_and(g, 1);
if (result) {
abort();
}
}
print_result(s, "Atomic and-and-fetch, using result:");
}

global = 0;
global = ATOMIC_VAR_INIT(0);

s = mach_absolute_time();
for (i = cnt; i; i--) {
OSAtomicIncrement32Barrier(&global);
__c11_atomic_fetch_add(&global, 1, memory_order_seq_cst);
}
print_result(s, "OSAtomicIncrement32Barrier:");
print_result(s, "atomic_fetch_add with memory_order_seq_cst barrier:");

global = 0;
global = ATOMIC_VAR_INIT(0);

s = mach_absolute_time();
for (i = cnt; i; i--) {
OSAtomicIncrement32(&global);
__c11_atomic_fetch_add(&global, 1, memory_order_relaxed);
}
print_result(s, "OSAtomicIncrement32:");
print_result(s, "atomic_fetch_add with memory_order_relaxed barrier:");

w_global = 0;
w_global = ATOMIC_VAR_INIT(0);

s = mach_absolute_time();
for (i = cnt; i; i--) {
OSAtomicIncrement64Barrier(&w_global);
__c11_atomic_fetch_add(&wglobal, 1, memory_order_seq_cst);
}
print_result(s, "OSAtomicIncrement64Barrier:");
print_result(s, "64-bit atomic_fetch_add with memory_order_seq_cst barrier:");

w_global = 0;
w_global = ATOMIC_VAR_INIT(0);

s = mach_absolute_time();
for (i = cnt; i; i--) {
OSAtomicIncrement64(&w_global);
__c11_atomic_fetch_add(&wglobal, 1, memory_order_relaxed);
}
print_result(s, "OSAtomicIncrement64:");
print_result(s, "64-bit atomic_fetch_add with memory_order_seq_cst barrier:");

global = 0;
global = ATOMIC_VAR_INIT(0);

s = mach_absolute_time();
for (i = cnt; i; i--) {
while (!__sync_bool_compare_and_swap(&global, 0, 1)) {
atomic_int zero = ATOMIC_VAR_INIT(0);
while (!atomic_compare_exchange_weak(&global, &zero, 1)) {
do {
#if defined(__i386__) || defined(__x86_64__)
__asm__ __volatile__ ("pause");
Expand All @@ -646,16 +644,16 @@ __asm__ __volatile__ ("svc 0x80" : "+r" (_r0)
#endif
} while (global);
}
global = 0;
global = ATOMIC_VAR_INIT(0);
}
print_result(s, "Inlined spin lock/unlock:");

s = mach_absolute_time();
for (i = cnt; i; i--) {
OSSpinLockLock(&slock);
OSSpinLockUnlock(&slock);
os_unfair_lock_lock(&slock);
os_unfair_lock_unlock(&slock);
}
print_result(s, "OSSpinLock/Unlock:");
print_result(s, "os_unfair_lock_lock/unlock:");

s = mach_absolute_time();
for (i = cnt; i; i--) {
Expand Down
11 changes: 5 additions & 6 deletions tests/Foundation/dispatch_apply_gc.m
Original file line number Diff line number Diff line change
Expand Up @@ -30,26 +30,25 @@
#else
const size_t final = 1000, desclen = 8892;
#endif
NSAutoreleasePool *pool = nil;

static void
work(void* ctxt __attribute__((unused)))
{
pool = [[NSAutoreleasePool alloc] init];
@autoreleasepool {
NSMutableArray *a = [NSMutableArray array];
OSSpinLock sl = OS_SPINLOCK_INIT, *l = &sl;
os_unfair_lock sl = OS_UNFAIR_LOCK_INIT, *l = &sl;

dispatch_apply(final, dispatch_get_global_queue(0, 0), ^(size_t i){
NSDecimalNumber *n = [NSDecimalNumber decimalNumberWithDecimal:
[[NSNumber numberWithInteger:i] decimalValue]];
OSSpinLockLock(l);
os_unfair_lock_lock(l);
[a addObject:n];
OSSpinLockUnlock(l);
os_unfair_lock_unlock(l);
});
test_long("count", [a count], final);
test_long("description length", [[a description] length], desclen);
a = nil;
[pool drain];
}
test_stop_after_delay((void*)(intptr_t)1);
}

Expand Down
4 changes: 2 additions & 2 deletions tests/Foundation/nsoperation.m
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ - (void)main
{
dispatch_test_start("NSOperation");

NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
@ autoreleasepool {

NSOperationQueue *queue = [[[NSOperationQueue alloc] init] autorelease];
test_ptr_notnull("NSOperationQueue", queue);
Expand All @@ -67,7 +67,7 @@ - (void)main

[[NSRunLoop mainRunLoop] run];

[pool release];
}

return 0;
}
3 changes: 0 additions & 3 deletions tests/dispatch_after.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,6 @@
#endif
#include <stdlib.h>
#include <assert.h>
#ifdef __APPLE__
#include <libkern/OSAtomic.h>
#endif

#include <bsdtests.h>
#include <Block.h>
Expand Down
Loading