mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2025-08-05 16:51:10 +00:00
atomic: introduce smp_mb_acquire and smp_mb_release
This commit is contained in:
parent
b41bb81737
commit
fd7ef4c184
|
@ -38,16 +38,16 @@ void _ReadWriteBarrier(void);
|
||||||
* Add one here, and similarly in smp_rmb() and smp_read_barrier_depends().
|
* Add one here, and similarly in smp_rmb() and smp_read_barrier_depends().
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define smp_mb() ({ barrier(); __atomic_thread_fence(__ATOMIC_SEQ_CST); barrier(); })
|
#define smp_mb() ({ barrier(); __atomic_thread_fence(__ATOMIC_SEQ_CST); })
|
||||||
#define smp_wmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); barrier(); })
|
#define smp_mb_release() ({ barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); })
|
||||||
#define smp_rmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); barrier(); })
|
#define smp_mb_acquire() ({ barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); })
|
||||||
|
|
||||||
/* Most compilers currently treat consume and acquire the same, but really
|
/* Most compilers currently treat consume and acquire the same, but really
|
||||||
* no processors except Alpha need a barrier here. Leave it in if
|
* no processors except Alpha need a barrier here. Leave it in if
|
||||||
* using Thread Sanitizer to avoid warnings, otherwise optimize it away.
|
* using Thread Sanitizer to avoid warnings, otherwise optimize it away.
|
||||||
*/
|
*/
|
||||||
#if defined(__SANITIZE_THREAD__)
|
#if defined(__SANITIZE_THREAD__)
|
||||||
#define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); barrier(); })
|
#define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); barrier(); })
|
||||||
#elif defined(__alpha__)
|
#elif defined(__alpha__)
|
||||||
#define smp_read_barrier_depends() asm volatile("mb":::"memory")
|
#define smp_read_barrier_depends() asm volatile("mb":::"memory")
|
||||||
#else
|
#else
|
||||||
|
@ -114,13 +114,13 @@ void _ReadWriteBarrier(void);
|
||||||
QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
|
QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
|
||||||
typeof(*ptr) _val; \
|
typeof(*ptr) _val; \
|
||||||
__atomic_load(ptr, &_val, __ATOMIC_RELAXED); \
|
__atomic_load(ptr, &_val, __ATOMIC_RELAXED); \
|
||||||
smp_rmb(); \
|
smp_mb_acquire(); \
|
||||||
_val; \
|
_val; \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define atomic_mb_set(ptr, i) do { \
|
#define atomic_mb_set(ptr, i) do { \
|
||||||
QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
|
QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
|
||||||
smp_wmb(); \
|
smp_mb_release(); \
|
||||||
__atomic_store_n(ptr, i, __ATOMIC_RELAXED); \
|
__atomic_store_n(ptr, i, __ATOMIC_RELAXED); \
|
||||||
smp_mb(); \
|
smp_mb(); \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
@ -212,8 +212,8 @@ void _ReadWriteBarrier(void);
|
||||||
* here (a compiler barrier only). QEMU doesn't do accesses to write-combining
|
* here (a compiler barrier only). QEMU doesn't do accesses to write-combining
|
||||||
* qemu memory or non-temporal load/stores from C code.
|
* qemu memory or non-temporal load/stores from C code.
|
||||||
*/
|
*/
|
||||||
#define smp_wmb() barrier()
|
#define smp_mb_release() barrier()
|
||||||
#define smp_rmb() barrier()
|
#define smp_mb_acquire() barrier()
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* __sync_lock_test_and_set() is documented to be an acquire barrier only,
|
* __sync_lock_test_and_set() is documented to be an acquire barrier only,
|
||||||
|
@ -237,13 +237,15 @@ void _ReadWriteBarrier(void);
|
||||||
* smp_mb has the same problem as on x86 for not-very-new GCC
|
* smp_mb has the same problem as on x86 for not-very-new GCC
|
||||||
* (http://patchwork.ozlabs.org/patch/126184/, Nov 2011).
|
* (http://patchwork.ozlabs.org/patch/126184/, Nov 2011).
|
||||||
*/
|
*/
|
||||||
#define smp_wmb() ({ asm volatile("eieio" ::: "memory"); (void)0; })
|
#define smp_wmb() ({ asm volatile("eieio" ::: "memory"); (void)0; })
|
||||||
#if defined(__powerpc64__)
|
#if defined(__powerpc64__)
|
||||||
#define smp_rmb() ({ asm volatile("lwsync" ::: "memory"); (void)0; })
|
#define smp_mb_release() ({ asm volatile("lwsync" ::: "memory"); (void)0; })
|
||||||
|
#define smp_mb_acquire() ({ asm volatile("lwsync" ::: "memory"); (void)0; })
|
||||||
#else
|
#else
|
||||||
#define smp_rmb() ({ asm volatile("sync" ::: "memory"); (void)0; })
|
#define smp_mb_release() ({ asm volatile("sync" ::: "memory"); (void)0; })
|
||||||
|
#define smp_mb_acquire() ({ asm volatile("sync" ::: "memory"); (void)0; })
|
||||||
#endif
|
#endif
|
||||||
#define smp_mb() ({ asm volatile("sync" ::: "memory"); (void)0; })
|
#define smp_mb() ({ asm volatile("sync" ::: "memory"); (void)0; })
|
||||||
|
|
||||||
#endif /* _ARCH_PPC */
|
#endif /* _ARCH_PPC */
|
||||||
|
|
||||||
|
@ -251,18 +253,18 @@ void _ReadWriteBarrier(void);
|
||||||
* For (host) platforms we don't have explicit barrier definitions
|
* For (host) platforms we don't have explicit barrier definitions
|
||||||
* for, we use the gcc __sync_synchronize() primitive to generate a
|
* for, we use the gcc __sync_synchronize() primitive to generate a
|
||||||
* full barrier. This should be safe on all platforms, though it may
|
* full barrier. This should be safe on all platforms, though it may
|
||||||
* be overkill for smp_wmb() and smp_rmb().
|
* smp_mb_acquire() and smp_mb_release().
|
||||||
*/
|
*/
|
||||||
#ifndef smp_mb
|
#ifndef smp_mb
|
||||||
#define smp_mb() __sync_synchronize()
|
#define smp_mb() __sync_synchronize()
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef smp_wmb
|
#ifndef smp_wmb
|
||||||
#define smp_wmb() __sync_synchronize()
|
#define smp_wmb() __sync_synchronize()
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef smp_rmb
|
#ifndef smp_rmb
|
||||||
#define smp_rmb() __sync_synchronize()
|
#define smp_rmb() __sync_synchronize()
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef smp_read_barrier_depends
|
#ifndef smp_read_barrier_depends
|
||||||
|
@ -339,12 +341,12 @@ void _ReadWriteBarrier(void);
|
||||||
*/
|
*/
|
||||||
#define atomic_mb_read(ptr) ({ \
|
#define atomic_mb_read(ptr) ({ \
|
||||||
typeof(*ptr) _val = atomic_read(ptr); \
|
typeof(*ptr) _val = atomic_read(ptr); \
|
||||||
smp_rmb(); \
|
smp_mb_acquire(); \
|
||||||
_val; \
|
_val; \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define atomic_mb_set(ptr, i) do { \
|
#define atomic_mb_set(ptr, i) do { \
|
||||||
smp_wmb(); \
|
smp_mb_release(); \
|
||||||
atomic_set(ptr, i); \
|
atomic_set(ptr, i); \
|
||||||
smp_mb(); \
|
smp_mb(); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
@ -392,4 +394,12 @@ void _ReadWriteBarrier(void);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* __ATOMIC_RELAXED */
|
#endif /* __ATOMIC_RELAXED */
|
||||||
|
|
||||||
|
#ifndef smp_wmb
|
||||||
|
#define smp_wmb() smp_mb_release()
|
||||||
|
#endif
|
||||||
|
#ifndef smp_rmb
|
||||||
|
#define smp_rmb() smp_mb_acquire()
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* QEMU_ATOMIC_H */
|
#endif /* QEMU_ATOMIC_H */
|
||||||
|
|
Loading…
Reference in a new issue