mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2024-12-23 14:45:48 +00:00
include/qemu/atomic.h: default to __atomic functions
The __atomic primitives have been available since GCC 4.7 and provide a richer interface for describing memory ordering requirements. As a bonus by using the primitives instead of hand-rolled functions we can use tools such as the ThreadSanitizer which need the use of well defined APIs for its analysis. If we have __ATOMIC defines we exclusively use the __atomic primitives for all our atomic access. Otherwise we fall back to the mixture of __sync and hand-rolled barrier cases. Backports commit a0aa44b488b3601415d55041e4619aef5f3a4ba8 from qemu
This commit is contained in:
parent
4e7259a49b
commit
171d267209
|
@ -8,6 +8,8 @@
|
||||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||||
* See the COPYING file in the top-level directory.
|
* See the COPYING file in the top-level directory.
|
||||||
*
|
*
|
||||||
|
* See docs/atomics.txt for discussion about the guarantees each
|
||||||
|
* atomic primitive is meant to provide.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef __QEMU_ATOMIC_H
|
#ifndef __QEMU_ATOMIC_H
|
||||||
|
@ -15,8 +17,6 @@
|
||||||
|
|
||||||
#include "qemu/compiler.h"
|
#include "qemu/compiler.h"
|
||||||
|
|
||||||
/* For C11 atomic ops */
|
|
||||||
|
|
||||||
/* Compiler barrier */
|
/* Compiler barrier */
|
||||||
#ifdef _MSC_VER
|
#ifdef _MSC_VER
|
||||||
void _ReadWriteBarrier(void);
|
void _ReadWriteBarrier(void);
|
||||||
|
@ -26,7 +26,126 @@ void _ReadWriteBarrier(void);
|
||||||
#define barrier() ({ asm volatile("" ::: "memory"); (void)0; })
|
#define barrier() ({ asm volatile("" ::: "memory"); (void)0; })
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef __ATOMIC_RELAXED
|
#ifdef __ATOMIC_RELAXED
|
||||||
|
/* For C11 atomic ops */
|
||||||
|
|
||||||
|
/* Manual memory barriers
|
||||||
|
*
|
||||||
|
*__atomic_thread_fence does not include a compiler barrier; instead,
|
||||||
|
* the barrier is part of __atomic_load/__atomic_store's "volatile-like"
|
||||||
|
* semantics. If smp_wmb() is a no-op, absence of the barrier means that
|
||||||
|
* the compiler is free to reorder stores on each side of the barrier.
|
||||||
|
* Add one here, and similarly in smp_rmb() and smp_read_barrier_depends().
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define smp_mb() ({ barrier(); __atomic_thread_fence(__ATOMIC_SEQ_CST); barrier(); })
|
||||||
|
#define smp_wmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); barrier(); })
|
||||||
|
#define smp_rmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); barrier(); })
|
||||||
|
|
||||||
|
#define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); barrier(); })
|
||||||
|
|
||||||
|
/* Weak atomic operations prevent the compiler moving other
|
||||||
|
* loads/stores past the atomic operation load/store. However there is
|
||||||
|
* no explicit memory barrier for the processor.
|
||||||
|
*/
|
||||||
|
#define atomic_read(ptr) \
|
||||||
|
({ \
|
||||||
|
typeof(*ptr) _val; \
|
||||||
|
__atomic_load(ptr, &_val, __ATOMIC_RELAXED); \
|
||||||
|
_val; \
|
||||||
|
})
|
||||||
|
|
||||||
|
#define atomic_set(ptr, i) do { \
|
||||||
|
typeof(*ptr) _val = (i); \
|
||||||
|
__atomic_store(ptr, &_val, __ATOMIC_RELAXED); \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
/* Atomic RCU operations imply weak memory barriers */
|
||||||
|
|
||||||
|
#define atomic_rcu_read(ptr) \
|
||||||
|
({ \
|
||||||
|
typeof(*ptr) _val; \
|
||||||
|
__atomic_load(ptr, &_val, __ATOMIC_CONSUME); \
|
||||||
|
_val; \
|
||||||
|
})
|
||||||
|
|
||||||
|
#define atomic_rcu_set(ptr, i) do { \
|
||||||
|
typeof(*ptr) _val = (i); \
|
||||||
|
__atomic_store(ptr, &_val, __ATOMIC_RELEASE); \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
/* atomic_mb_read/set semantics map Java volatile variables. They are
|
||||||
|
* less expensive on some platforms (notably POWER & ARMv7) than fully
|
||||||
|
* sequentially consistent operations.
|
||||||
|
*
|
||||||
|
* As long as they are used as paired operations they are safe to
|
||||||
|
* use. See docs/atomic.txt for more discussion.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#if defined(_ARCH_PPC)
|
||||||
|
#define atomic_mb_read(ptr) \
|
||||||
|
({ \
|
||||||
|
typeof(*ptr) _val; \
|
||||||
|
__atomic_load(ptr, &_val, __ATOMIC_RELAXED); \
|
||||||
|
smp_rmb(); \
|
||||||
|
_val; \
|
||||||
|
})
|
||||||
|
|
||||||
|
#define atomic_mb_set(ptr, i) do { \
|
||||||
|
typeof(*ptr) _val = (i); \
|
||||||
|
smp_wmb(); \
|
||||||
|
__atomic_store(ptr, &_val, __ATOMIC_RELAXED); \
|
||||||
|
smp_mb(); \
|
||||||
|
} while(0)
|
||||||
|
#else
|
||||||
|
#define atomic_mb_read(ptr) \
|
||||||
|
({ \
|
||||||
|
typeof(*ptr) _val; \
|
||||||
|
__atomic_load(ptr, &_val, __ATOMIC_SEQ_CST); \
|
||||||
|
_val; \
|
||||||
|
})
|
||||||
|
|
||||||
|
#define atomic_mb_set(ptr, i) do { \
|
||||||
|
typeof(*ptr) _val = (i); \
|
||||||
|
__atomic_store(ptr, &_val, __ATOMIC_SEQ_CST); \
|
||||||
|
} while(0)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
/* All the remaining operations are fully sequentially consistent */
|
||||||
|
|
||||||
|
#define atomic_xchg(ptr, i) ({ \
|
||||||
|
typeof(*ptr) _new = (i), _old; \
|
||||||
|
__atomic_exchange(ptr, &_new, &_old, __ATOMIC_SEQ_CST); \
|
||||||
|
_old; \
|
||||||
|
})
|
||||||
|
|
||||||
|
/* Returns the eventual value, failed or not */
|
||||||
|
#define atomic_cmpxchg(ptr, old, new) \
|
||||||
|
({ \
|
||||||
|
typeof(*ptr) _old = (old), _new = (new); \
|
||||||
|
__atomic_compare_exchange(ptr, &_old, &_new, false, \
|
||||||
|
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \
|
||||||
|
_old; \
|
||||||
|
})
|
||||||
|
|
||||||
|
/* Provide shorter names for GCC atomic builtins, return old value */
|
||||||
|
#define atomic_fetch_inc(ptr) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)
|
||||||
|
#define atomic_fetch_dec(ptr) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)
|
||||||
|
#define atomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)
|
||||||
|
#define atomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)
|
||||||
|
#define atomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)
|
||||||
|
#define atomic_fetch_or(ptr, n) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)
|
||||||
|
|
||||||
|
/* And even shorter names that return void. */
|
||||||
|
#define atomic_inc(ptr) ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST))
|
||||||
|
#define atomic_dec(ptr) ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST))
|
||||||
|
#define atomic_add(ptr, n) ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST))
|
||||||
|
#define atomic_sub(ptr, n) ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST))
|
||||||
|
#define atomic_and(ptr, n) ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST))
|
||||||
|
#define atomic_or(ptr, n) ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST))
|
||||||
|
|
||||||
|
#else /* __ATOMIC_RELAXED */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We use GCC builtin if it's available, as that can use mfence on
|
* We use GCC builtin if it's available, as that can use mfence on
|
||||||
|
@ -101,8 +220,6 @@ void _ReadWriteBarrier(void);
|
||||||
|
|
||||||
#endif /* _ARCH_PPC */
|
#endif /* _ARCH_PPC */
|
||||||
|
|
||||||
#endif /* C11 atomics */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For (host) platforms we don't have explicit barrier definitions
|
* For (host) platforms we don't have explicit barrier definitions
|
||||||
* for, we use the gcc __sync_synchronize() primitive to generate a
|
* for, we use the gcc __sync_synchronize() primitive to generate a
|
||||||
|
@ -114,42 +231,62 @@ void _ReadWriteBarrier(void);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef smp_wmb
|
#ifndef smp_wmb
|
||||||
#ifdef __ATOMIC_RELEASE
|
|
||||||
/* __atomic_thread_fence does not include a compiler barrier; instead,
|
|
||||||
* the barrier is part of __atomic_load/__atomic_store's "volatile-like"
|
|
||||||
* semantics. If smp_wmb() is a no-op, absence of the barrier means that
|
|
||||||
* the compiler is free to reorder stores on each side of the barrier.
|
|
||||||
* Add one here, and similarly in smp_rmb() and smp_read_barrier_depends().
|
|
||||||
*/
|
|
||||||
#define smp_wmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); barrier(); })
|
|
||||||
#else
|
|
||||||
#define smp_wmb() __sync_synchronize()
|
#define smp_wmb() __sync_synchronize()
|
||||||
#endif
|
#endif
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef smp_rmb
|
#ifndef smp_rmb
|
||||||
#ifdef __ATOMIC_ACQUIRE
|
|
||||||
#define smp_rmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); barrier(); })
|
|
||||||
#else
|
|
||||||
#define smp_rmb() __sync_synchronize()
|
#define smp_rmb() __sync_synchronize()
|
||||||
#endif
|
#endif
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef smp_read_barrier_depends
|
#ifndef smp_read_barrier_depends
|
||||||
#ifdef __ATOMIC_CONSUME
|
|
||||||
#define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); barrier(); })
|
|
||||||
#else
|
|
||||||
#define smp_read_barrier_depends() barrier()
|
#define smp_read_barrier_depends() barrier()
|
||||||
#endif
|
#endif
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef atomic_read
|
/* These will only be atomic if the processor does the fetch or store
|
||||||
|
* in a single issue memory operation
|
||||||
|
*/
|
||||||
#define atomic_read(ptr) (*(__typeof__(*ptr) volatile*) (ptr))
|
#define atomic_read(ptr) (*(__typeof__(*ptr) volatile*) (ptr))
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef atomic_set
|
|
||||||
#define atomic_set(ptr, i) ((*(__typeof__(*ptr) volatile*) (ptr)) = (i))
|
#define atomic_set(ptr, i) ((*(__typeof__(*ptr) volatile*) (ptr)) = (i))
|
||||||
#endif
|
|
||||||
|
/**
|
||||||
|
* atomic_rcu_read - reads a RCU-protected pointer to a local variable
|
||||||
|
* into a RCU read-side critical section. The pointer can later be safely
|
||||||
|
* dereferenced within the critical section.
|
||||||
|
*
|
||||||
|
* This ensures that the pointer copy is invariant thorough the whole critical
|
||||||
|
* section.
|
||||||
|
*
|
||||||
|
* Inserts memory barriers on architectures that require them (currently only
|
||||||
|
* Alpha) and documents which pointers are protected by RCU.
|
||||||
|
*
|
||||||
|
* atomic_rcu_read also includes a compiler barrier to ensure that
|
||||||
|
* value-speculative optimizations (e.g. VSS: Value Speculation
|
||||||
|
* Scheduling) does not perform the data read before the pointer read
|
||||||
|
* by speculating the value of the pointer.
|
||||||
|
*
|
||||||
|
* Should match atomic_rcu_set(), atomic_xchg(), atomic_cmpxchg().
|
||||||
|
*/
|
||||||
|
#define atomic_rcu_read(ptr) ({ \
|
||||||
|
typeof(*ptr) _val = atomic_read(ptr); \
|
||||||
|
smp_read_barrier_depends(); \
|
||||||
|
_val; \
|
||||||
|
})
|
||||||
|
|
||||||
|
/**
|
||||||
|
* atomic_rcu_set - assigns (publicizes) a pointer to a new data structure
|
||||||
|
* meant to be read by RCU read-side critical sections.
|
||||||
|
*
|
||||||
|
* Documents which pointers will be dereferenced by RCU read-side critical
|
||||||
|
* sections and adds the required memory barriers on architectures requiring
|
||||||
|
* them. It also makes sure the compiler does not reorder code initializing the
|
||||||
|
* data structure before its publication.
|
||||||
|
*
|
||||||
|
* Should match atomic_rcu_read().
|
||||||
|
*/
|
||||||
|
#define atomic_rcu_set(ptr, i) do { \
|
||||||
|
smp_wmb(); \
|
||||||
|
atomic_set(ptr, i); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
/* These have the same semantics as Java volatile variables.
|
/* These have the same semantics as Java volatile variables.
|
||||||
* See http://gee.cs.oswego.edu/dl/jmm/cookbook.html:
|
* See http://gee.cs.oswego.edu/dl/jmm/cookbook.html:
|
||||||
|
@ -173,31 +310,21 @@ void _ReadWriteBarrier(void);
|
||||||
* (see docs/atomics.txt), and I'm not sure that __ATOMIC_ACQ_REL is enough.
|
* (see docs/atomics.txt), and I'm not sure that __ATOMIC_ACQ_REL is enough.
|
||||||
* Just always use the barriers manually by the rules above.
|
* Just always use the barriers manually by the rules above.
|
||||||
*/
|
*/
|
||||||
#ifndef atomic_mb_read
|
|
||||||
#define atomic_mb_read(ptr) ({ \
|
#define atomic_mb_read(ptr) ({ \
|
||||||
typeof(*ptr) _val = atomic_read(ptr); \
|
typeof(*ptr) _val = atomic_read(ptr); \
|
||||||
smp_rmb(); \
|
smp_rmb(); \
|
||||||
_val; \
|
_val; \
|
||||||
})
|
})
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef atomic_mb_set
|
|
||||||
#define atomic_mb_set(ptr, i) do { \
|
#define atomic_mb_set(ptr, i) do { \
|
||||||
smp_wmb(); \
|
smp_wmb(); \
|
||||||
atomic_set(ptr, i); \
|
atomic_set(ptr, i); \
|
||||||
smp_mb(); \
|
smp_mb(); \
|
||||||
} while (0)
|
} while (0)
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef atomic_xchg
|
#ifndef atomic_xchg
|
||||||
#if defined(__clang__)
|
#if defined(__clang__)
|
||||||
#define atomic_xchg(ptr, i) __sync_swap(ptr, i)
|
#define atomic_xchg(ptr, i) __sync_swap(ptr, i)
|
||||||
#elif defined(__ATOMIC_SEQ_CST)
|
|
||||||
#define atomic_xchg(ptr, i) ({ \
|
|
||||||
typeof(*ptr) _new = (i), _old; \
|
|
||||||
__atomic_exchange(ptr, &_new, &_old, __ATOMIC_SEQ_CST); \
|
|
||||||
_old; \
|
|
||||||
})
|
|
||||||
#else
|
#else
|
||||||
/* __sync_lock_test_and_set() is documented to be an acquire barrier only. */
|
/* __sync_lock_test_and_set() is documented to be an acquire barrier only. */
|
||||||
#define atomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i))
|
#define atomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i))
|
||||||
|
@ -237,4 +364,5 @@ void _ReadWriteBarrier(void);
|
||||||
#define atomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n))
|
#define atomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n))
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif /* __ATOMIC_RELAXED */
|
||||||
|
#endif /* __QEMU_ATOMIC_H */
|
||||||
|
|
Loading…
Reference in a new issue