diff --git a/src/internal.rs b/src/internal.rs index 39179d76d..7c26e133f 100644 --- a/src/internal.rs +++ b/src/internal.rs @@ -412,7 +412,6 @@ pub struct Rav1dFrameContext_lf { #[repr(C)] pub struct Rav1dFrameContext_task_thread_pending_tasks { - pub merge: atomic_int, pub lock: pthread_mutex_t, pub head: *mut Rav1dTask, pub tail: *mut Rav1dTask, @@ -442,6 +441,7 @@ pub(crate) struct Rav1dFrameContext_task_thread { // [head;cur-1] when picking one for execution. pub task_cur_prev: *mut Rav1dTask, // async task insertion + pub pending_tasks_merge: AtomicI32, pub pending_tasks: Rav1dFrameContext_task_thread_pending_tasks, } diff --git a/src/lib.rs b/src/lib.rs index e99aab0e2..9e8727166 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -103,6 +103,7 @@ use std::mem; use std::mem::MaybeUninit; use std::process::abort; use std::ptr::NonNull; +use std::sync::atomic::AtomicI32; use std::sync::atomic::Ordering; use std::sync::Arc; use std::sync::Once; @@ -896,8 +897,7 @@ pub(crate) unsafe fn rav1d_flush(c: *mut Rav1dContext) { *fresh5 = 0 as *mut Rav1dTask; *&mut (*((*c).fc).offset(i_1 as isize)) .task_thread - .pending_tasks - .merge = 0 as c_int; + .pending_tasks_merge = AtomicI32::new(0); i_1 = i_1.wrapping_add(1); } *&mut (*c).task_thread.first = 0 as c_int as c_uint; diff --git a/src/thread_task.rs b/src/thread_task.rs index b6793c2ff..c7341b6cb 100644 --- a/src/thread_task.rs +++ b/src/thread_task.rs @@ -318,22 +318,23 @@ unsafe fn add_pending(f: *mut Rav1dFrameContext, t: *mut Rav1dTask) { (*(*f).task_thread.pending_tasks.tail).next = t; } (*f).task_thread.pending_tasks.tail = t; - ::core::intrinsics::atomic_store_seqcst(&mut (*f).task_thread.pending_tasks.merge, 1 as c_int); + (*f).task_thread + .pending_tasks_merge + .store(1, Ordering::SeqCst); pthread_mutex_unlock(&mut (*f).task_thread.pending_tasks.lock); } #[inline] unsafe fn merge_pending_frame(f: *mut Rav1dFrameContext) -> c_int { - let merge = ::core::intrinsics::atomic_load_seqcst(&mut (*f).task_thread.pending_tasks.merge); + let merge = (*f).task_thread.pending_tasks_merge.load(Ordering::SeqCst); if merge != 0 { pthread_mutex_lock(&mut (*f).task_thread.pending_tasks.lock); let mut t: *mut Rav1dTask = (*f).task_thread.pending_tasks.head; (*f).task_thread.pending_tasks.head = 0 as *mut Rav1dTask; (*f).task_thread.pending_tasks.tail = 0 as *mut Rav1dTask; - ::core::intrinsics::atomic_store_seqcst( - &mut (*f).task_thread.pending_tasks.merge, - 0 as c_int, - ); + (*f).task_thread + .pending_tasks_merge + .store(0, Ordering::SeqCst); pthread_mutex_unlock(&mut (*f).task_thread.pending_tasks.lock); while !t.is_null() { let tmp: *mut Rav1dTask = (*t).next; @@ -490,7 +491,9 @@ pub(crate) unsafe fn rav1d_task_create_tile_sbrow( (*(*f).task_thread.pending_tasks.tail).next = &mut *tasks.offset(0) as *mut Rav1dTask; } (*f).task_thread.pending_tasks.tail = prev_t; - ::core::intrinsics::atomic_store_seqcst(&mut (*f).task_thread.pending_tasks.merge, 1 as c_int); + (*f).task_thread + .pending_tasks_merge + .store(1, Ordering::SeqCst); (*f).task_thread.init_done.store(1, Ordering::SeqCst); pthread_mutex_unlock(&mut (*f).task_thread.pending_tasks.lock); Ok(())