qemu/atomic.h: rename atomic_ to qatomic_

clang's C11 atomic_fetch_*() functions only take a C11 atomic type
pointer argument. QEMU uses direct types (int, etc) and this causes a
compiler error when a QEMU code calls these functions in a source file
that also included <stdatomic.h> via a system header file:

$ CC=clang CXX=clang++ ./configure ... && make
../util/async.c:79:17: error: address argument to atomic operation must be a pointer to _Atomic type ('unsigned int *' invalid)

Avoid using atomic_*() names in QEMU's atomic.h since that namespace is
used by <stdatomic.h>. Prefix QEMU's APIs with 'q' so that atomic.h
and <stdatomic.h> can co-exist. I checked /usr/include on my machine and
searched GitHub for existing "qatomic_" users but there seem to be none.

This patch was generated using:

$ git grep -h -o '\<atomic\(64\)\?_[a-z0-9_]\+' include/qemu/atomic.h | \
sort -u >/tmp/changed_identifiers
$ for identifier in $(</tmp/changed_identifiers); do
sed -i "s%\<$identifier\>%q$identifier%g" \
$(git grep -I -l "\<$identifier\>")
done

I manually fixed line-wrap issues and misaligned rST tables.

Backports d73415a315471ac0b127ed3fad45c8ec5d711de1
This commit is contained in:
Stefan Hajnoczi 2021-03-08 14:34:24 -05:00 committed by Lioncash
parent 1341de97f0
commit 320b59ddb9
27 changed files with 228 additions and 252 deletions

View file

@ -68,9 +68,9 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
DATA_TYPE ret; DATA_TYPE ret;
#if DATA_SIZE == 16 #if DATA_SIZE == 16
ret = atomic16_cmpxchg(haddr, cmpv, newv); ret = qatomic16_cmpxchg(haddr, cmpv, newv);
#else #else
ret = atomic_cmpxchg__nocheck(haddr, cmpv, newv); ret = qatomic_cmpxchg__nocheck(haddr, cmpv, newv);
#endif #endif
ATOMIC_MMU_CLEANUP; ATOMIC_MMU_CLEANUP;
return ret; return ret;
@ -81,7 +81,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS) ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
{ {
DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP; DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP;
val = atomic16_read(haddr); val = qatomic16_read(haddr);
ATOMIC_MMU_CLEANUP; ATOMIC_MMU_CLEANUP;
return val; return val;
} }
@ -90,7 +90,7 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
ABI_TYPE val EXTRA_ARGS) ABI_TYPE val EXTRA_ARGS)
{ {
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
atomic16_set(haddr, val); qatomic16_set(haddr, val);
ATOMIC_MMU_CLEANUP; ATOMIC_MMU_CLEANUP;
} }
#endif #endif
@ -99,7 +99,7 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE val EXTRA_ARGS) ABI_TYPE val EXTRA_ARGS)
{ {
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
DATA_TYPE ret = atomic_xchg__nocheck(haddr, val); DATA_TYPE ret = qatomic_xchg__nocheck(haddr, val);
ATOMIC_MMU_CLEANUP; ATOMIC_MMU_CLEANUP;
return ret; return ret;
} }
@ -109,7 +109,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE val EXTRA_ARGS) \ ABI_TYPE val EXTRA_ARGS) \
{ \ { \
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
DATA_TYPE ret = atomic_##X(haddr, val); \ DATA_TYPE ret = qatomic_##X(haddr, val); \
ATOMIC_MMU_CLEANUP; \ ATOMIC_MMU_CLEANUP; \
return ret; \ return ret; \
} }
@ -136,10 +136,10 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
XDATA_TYPE cmp, old, new, val = xval; \ XDATA_TYPE cmp, old, new, val = xval; \
smp_mb(); \ smp_mb(); \
cmp = atomic_read__nocheck(haddr); \ cmp = qatomic_read__nocheck(haddr); \
do { \ do { \
old = cmp; new = FN(old, val); \ old = cmp; new = FN(old, val); \
cmp = atomic_cmpxchg__nocheck(haddr, old, new); \ cmp = qatomic_cmpxchg__nocheck(haddr, old, new); \
} while (cmp != old); \ } while (cmp != old); \
ATOMIC_MMU_CLEANUP; \ ATOMIC_MMU_CLEANUP; \
return RET; \ return RET; \
@ -177,9 +177,9 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
DATA_TYPE ret; DATA_TYPE ret;
#if DATA_SIZE == 16 #if DATA_SIZE == 16
ret = atomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv)); ret = qatomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv));
#else #else
ret = atomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv)); ret = qatomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv));
#endif #endif
ATOMIC_MMU_CLEANUP; ATOMIC_MMU_CLEANUP;
@ -191,7 +191,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS) ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
{ {
DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP; DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP;
val = atomic16_read(haddr); val = qatomic16_read(haddr);
ATOMIC_MMU_CLEANUP; ATOMIC_MMU_CLEANUP;
return BSWAP(val); return BSWAP(val);
} }
@ -201,7 +201,7 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
{ {
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
val = BSWAP(val); val = BSWAP(val);
atomic16_set(haddr, val); qatomic16_set(haddr, val);
ATOMIC_MMU_CLEANUP; ATOMIC_MMU_CLEANUP;
} }
#endif #endif
@ -210,7 +210,7 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE val EXTRA_ARGS) ABI_TYPE val EXTRA_ARGS)
{ {
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
ABI_TYPE ret = atomic_xchg__nocheck(haddr, BSWAP(val)); ABI_TYPE ret = qatomic_xchg__nocheck(haddr, BSWAP(val));
ATOMIC_MMU_CLEANUP; ATOMIC_MMU_CLEANUP;
return BSWAP(ret); return BSWAP(ret);
} }
@ -220,7 +220,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE val EXTRA_ARGS) \ ABI_TYPE val EXTRA_ARGS) \
{ \ { \
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
DATA_TYPE ret = atomic_##X(haddr, BSWAP(val)); \ DATA_TYPE ret = qatomic_##X(haddr, BSWAP(val)); \
ATOMIC_MMU_CLEANUP; \ ATOMIC_MMU_CLEANUP; \
return BSWAP(ret); \ return BSWAP(ret); \
} }
@ -245,10 +245,10 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
XDATA_TYPE ldo, ldn, old, new, val = xval; \ XDATA_TYPE ldo, ldn, old, new, val = xval; \
smp_mb(); \ smp_mb(); \
ldn = atomic_read__nocheck(haddr); \ ldn = qatomic_read__nocheck(haddr); \
do { \ do { \
ldo = ldn; old = BSWAP(ldo); new = FN(old, val); \ ldo = ldn; old = BSWAP(ldo); new = FN(old, val); \
ldn = atomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \ ldn = qatomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \
} while (ldo != ldn); \ } while (ldo != ldn); \
ATOMIC_MMU_CLEANUP; \ ATOMIC_MMU_CLEANUP; \
return RET; \ return RET; \

View file

@ -81,7 +81,7 @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
/* We were asked to stop executing TBs (probably a pending /* We were asked to stop executing TBs (probably a pending
* interrupt. We've now stopped, so clear the flag. * interrupt. We've now stopped, so clear the flag.
*/ */
atomic_set(&cpu->tcg_exit_req, 0); qatomic_set(&cpu->tcg_exit_req, 0);
} }
return ret; return ret;
@ -234,7 +234,7 @@ static inline TranslationBlock *tb_find(CPUState *cpu,
mmap_unlock(); mmap_unlock();
/* We add the TB in the virtual pc hash table for the fast lookup */ /* We add the TB in the virtual pc hash table for the fast lookup */
atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb); qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
} }
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
/* We don't take care of direct jumps when address mapping changes in /* We don't take care of direct jumps when address mapping changes in
@ -365,7 +365,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
{ {
CPUClass *cc = CPU_GET_CLASS(cpu->uc, cpu); CPUClass *cc = CPU_GET_CLASS(cpu->uc, cpu);
if (unlikely(atomic_read(&cpu->interrupt_request))) { if (unlikely(qatomic_read(&cpu->interrupt_request))) {
int interrupt_request = cpu->interrupt_request; int interrupt_request = cpu->interrupt_request;
if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) { if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
/* Mask out external interrupts for this step. */ /* Mask out external interrupts for this step. */
@ -420,8 +420,8 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
} }
/* Finally, check if we need to exit to the main loop. */ /* Finally, check if we need to exit to the main loop. */
if (unlikely(atomic_read(&cpu->exit_request))) { if (unlikely(qatomic_read(&cpu->exit_request))) {
atomic_set(&cpu->exit_request, 0); qatomic_set(&cpu->exit_request, 0);
if (cpu->exception_index == -1) { if (cpu->exception_index == -1) {
cpu->exception_index = EXCP_INTERRUPT; cpu->exception_index = EXCP_INTERRUPT;
} }
@ -459,7 +459,7 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
#ifdef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
abort(); abort();
#else #else
int insns_left = atomic_read(&cpu_neg(cpu)->icount_decr.u32); int insns_left = qatomic_read(&cpu_neg(cpu)->icount_decr.u32);
*last_tb = NULL; *last_tb = NULL;
if (cpu->icount_extra && insns_left >= 0) { if (cpu->icount_extra && insns_left >= 0) {
/* Refill decrementer and continue execution. */ /* Refill decrementer and continue execution. */
@ -539,8 +539,8 @@ int cpu_exec(struct uc_struct *uc, CPUState *cpu)
return EXCP_HALTED; return EXCP_HALTED;
} }
atomic_mb_set(&uc->current_cpu, cpu); qatomic_mb_set(&uc->current_cpu, cpu);
atomic_mb_set(&uc->tcg_current_rr_cpu, cpu); qatomic_mb_set(&uc->tcg_current_rr_cpu, cpu);
if (cc->tcg_ops.cpu_exec_enter) { if (cc->tcg_ops.cpu_exec_enter) {
cc->tcg_ops.cpu_exec_enter(cpu); cc->tcg_ops.cpu_exec_enter(cpu);

View file

@ -241,8 +241,8 @@ static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
#if TCG_OVERSIZED_GUEST #if TCG_OVERSIZED_GUEST
tlb_entry->addr_write |= TLB_NOTDIRTY; tlb_entry->addr_write |= TLB_NOTDIRTY;
#else #else
atomic_set(&tlb_entry->addr_write, qatomic_set(&tlb_entry->addr_write,
tlb_entry->addr_write | TLB_NOTDIRTY); tlb_entry->addr_write | TLB_NOTDIRTY);
#endif #endif
} }
} }
@ -514,8 +514,8 @@ static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
#if TCG_OVERSIZED_GUEST #if TCG_OVERSIZED_GUEST
return *(target_ulong *)((uintptr_t)entry + ofs); return *(target_ulong *)((uintptr_t)entry + ofs);
#else #else
/* ofs might correspond to .addr_write, so use atomic_read */ /* ofs might correspond to .addr_write, so use qatomic_read */
return atomic_read((target_ulong *)((uintptr_t)entry + ofs)); return qatomic_read((target_ulong *)((uintptr_t)entry + ofs));
#endif #endif
} }
@ -528,7 +528,7 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx]; CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
/* elt_ofs might correspond to .addr_write, so use atomic_read */ /* elt_ofs might correspond to .addr_write, so use qatomic_read */
target_ulong cmp = tlb_read_ofs(vtlb, elt_ofs); target_ulong cmp = tlb_read_ofs(vtlb, elt_ofs);
if (cmp == page) { if (cmp == page) {

View file

@ -494,26 +494,26 @@ static PageDesc *page_find_alloc(struct uc_struct *uc, tb_page_addr_t index, int
/* Level 2..N-1. */ /* Level 2..N-1. */
for (i = uc->v_l2_levels; i > 0; i--) { for (i = uc->v_l2_levels; i > 0; i--) {
void **p = atomic_read(lp); void **p = qatomic_read(lp);
if (p == NULL) { if (p == NULL) {
if (!alloc) { if (!alloc) {
return NULL; return NULL;
} }
p = g_new0(void *, V_L2_SIZE); p = g_new0(void *, V_L2_SIZE);
atomic_set(lp, p); qatomic_set(lp, p);
} }
lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1)); lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
} }
pd = atomic_read(lp); pd = qatomic_read(lp);
if (pd == NULL) { if (pd == NULL) {
if (!alloc) { if (!alloc) {
return NULL; return NULL;
} }
pd = g_new0(PageDesc, V_L2_SIZE); pd = g_new0(PageDesc, V_L2_SIZE);
atomic_set(lp, pd); qatomic_set(lp, pd);
} }
return pd + (index & (V_L2_SIZE - 1)); return pd + (index & (V_L2_SIZE - 1));
@ -978,7 +978,7 @@ void tb_flush(CPUState *cpu)
} }
cpu_tb_jmp_cache_clear(cpu); cpu_tb_jmp_cache_clear(cpu);
atomic_mb_set(&cpu->tb_flushed, true); qatomic_mb_set(&cpu->tb_flushed, true);
tcg_ctx->tb_ctx.nb_tbs = 0; tcg_ctx->tb_ctx.nb_tbs = 0;
memset(tcg_ctx->tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx->tb_ctx.tb_phys_hash)); memset(tcg_ctx->tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx->tb_ctx.tb_phys_hash));
@ -1146,7 +1146,7 @@ void tb_phys_invalidate(struct uc_struct *uc,
uint32_t h; uint32_t h;
tb_page_addr_t phys_pc; tb_page_addr_t phys_pc;
atomic_set(&tb->cflags, tb->cflags | CF_INVALID); qatomic_set(&tb->cflags, tb->cflags | CF_INVALID);
/* remove the TB from the hash list */ /* remove the TB from the hash list */
phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
@ -1167,8 +1167,8 @@ void tb_phys_invalidate(struct uc_struct *uc,
/* remove the TB from the hash list */ /* remove the TB from the hash list */
h = tb_jmp_cache_hash_func(tb->pc); h = tb_jmp_cache_hash_func(tb->pc);
if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) { if (qatomic_read(&cpu->tb_jmp_cache[h]) == tb) {
atomic_set(&cpu->tb_jmp_cache[h], NULL); qatomic_set(&cpu->tb_jmp_cache[h], NULL);
} }
/* suppress this TB from the two jump lists */ /* suppress this TB from the two jump lists */
@ -1936,7 +1936,7 @@ static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr); unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
for (i = 0; i < TB_JMP_PAGE_SIZE; i++) { for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL); qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
} }
} }
@ -2019,7 +2019,7 @@ void cpu_interrupt(CPUState *cpu, int mask)
{ {
cpu->interrupt_request |= mask; cpu->interrupt_request |= mask;
cpu->tcg_exit_req = 1; cpu->tcg_exit_req = 1;
atomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1); qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
} }
#if 0 #if 0

View file

@ -191,7 +191,7 @@ static bool tcg_exec_all(struct uc_struct* uc)
} }
if (uc->cpu && uc->cpu->exit_request) { if (uc->cpu && uc->cpu->exit_request) {
atomic_mb_set(&uc->cpu->exit_request, 0); qatomic_mb_set(&uc->cpu->exit_request, 0);
} }
return finish; return finish;

View file

@ -320,7 +320,7 @@ static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
hwaddr addr, hwaddr addr,
bool resolve_subpage) bool resolve_subpage)
{ {
MemoryRegionSection *section = atomic_read(&d->mru_section); MemoryRegionSection *section = qatomic_read(&d->mru_section);
subpage_t *subpage; subpage_t *subpage;
bool update; bool update;
@ -336,7 +336,7 @@ static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]]; section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
} }
if (update) { if (update) {
atomic_set(&d->mru_section, section); qatomic_set(&d->mru_section, section);
} }
return section; return section;
} }
@ -391,7 +391,6 @@ static MemoryRegionSection flatview_do_translate(FlatView *fv,
AddressSpaceDispatch *d = flatview_to_dispatch(fv); AddressSpaceDispatch *d = flatview_to_dispatch(fv);
for (;;) { for (;;) {
// Unicorn: atomic_read used instead of atomic_rcu_read
section = address_space_translate_internal( section = address_space_translate_internal(
flatview_to_dispatch(fv), addr, &addr, flatview_to_dispatch(fv), addr, &addr,
plen, is_mmio); plen, is_mmio);
@ -498,8 +497,8 @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
MemTxAttrs attrs, int *prot) MemTxAttrs attrs, int *prot)
{ {
MemoryRegionSection *section; MemoryRegionSection *section;
// Unicorn: atomic_read used instead of atomic_rcu_read // Unicorn: qatomic_read used instead of qatomic_rcu_read
AddressSpaceDispatch *d = atomic_read(&cpu->cpu_ases[asidx].memory_dispatch); AddressSpaceDispatch *d = qatomic_read(&cpu->cpu_ases[asidx].memory_dispatch);
section = address_space_translate_internal(d, addr, xlat, plen, false); section = address_space_translate_internal(d, addr, xlat, plen, false);
@ -1884,8 +1883,8 @@ MemoryRegionSection *iotlb_to_section(CPUState *cpu,
{ {
int asidx = cpu_asidx_from_attrs(cpu, attrs); int asidx = cpu_asidx_from_attrs(cpu, attrs);
CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx]; CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
// Unicorn: uses atomic_read instead of atomic_rcu_read // Unicorn: uses qatomic_read instead of qatomic_rcu_read
AddressSpaceDispatch *d = atomic_read(&cpuas->memory_dispatch); AddressSpaceDispatch *d = qatomic_read(&cpuas->memory_dispatch);
MemoryRegionSection *sections = d->map.sections; MemoryRegionSection *sections = d->map.sections;
return &sections[index & ~TARGET_PAGE_MASK]; return &sections[index & ~TARGET_PAGE_MASK];
@ -1931,8 +1930,8 @@ static void tcg_commit(MemoryListener *listener)
* may have split the RCU critical section. * may have split the RCU critical section.
*/ */
d = address_space_to_dispatch(cpuas->as); d = address_space_to_dispatch(cpuas->as);
// Unicorn: atomic_set used instead of atomic_rcu_set // Unicorn: qatomic_set used instead of qatomic_rcu_set
atomic_set(&cpuas->memory_dispatch, d); qatomic_set(&cpuas->memory_dispatch, d);
tlb_flush(cpuas->cpu); tlb_flush(cpuas->cpu);
} }
@ -2356,7 +2355,7 @@ void *address_space_map(AddressSpace *as,
l = len; l = len;
mr = flatview_translate(fv, addr, &xlat, &l, is_write); mr = flatview_translate(fv, addr, &xlat, &l, is_write);
if (!memory_access_is_direct(mr, is_write)) { if (!memory_access_is_direct(mr, is_write)) {
if (atomic_xchg(&as->uc->bounce.in_use, true)) { if (qatomic_xchg(&as->uc->bounce.in_use, true)) {
return NULL; return NULL;
} }
/* Avoid unbounded allocations */ /* Avoid unbounded allocations */
@ -2406,7 +2405,7 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
qemu_vfree(as->uc->bounce.buffer); qemu_vfree(as->uc->bounce.buffer);
as->uc->bounce.buffer = NULL; as->uc->bounce.buffer = NULL;
memory_region_unref(as->uc->bounce.mr); memory_region_unref(as->uc->bounce.mr);
atomic_mb_set(&as->uc->bounce.in_use, false); qatomic_mb_set(&as->uc->bounce.in_use, false);
} }
void *cpu_physical_memory_map(AddressSpace *as, hwaddr addr, void *cpu_physical_memory_map(AddressSpace *as, hwaddr addr,

View file

@ -95,7 +95,7 @@ static inline target_ulong tlb_addr_write(const CPUTLBEntry *entry)
#if TCG_OVERSIZED_GUEST #if TCG_OVERSIZED_GUEST
return entry->addr_write; return entry->addr_write;
#else #else
return atomic_read(&entry->addr_write); return qatomic_read(&entry->addr_write);
#endif #endif
} }

View file

@ -341,10 +341,10 @@ struct TranslationBlock {
uintptr_t jmp_list_first; uintptr_t jmp_list_first;
}; };
/* Hide the atomic_read to make code a little easier on the eyes */ /* Hide the qatomic_read to make code a little easier on the eyes */
static inline uint32_t tb_cflags(const TranslationBlock *tb) static inline uint32_t tb_cflags(const TranslationBlock *tb)
{ {
return atomic_read(&tb->cflags); return qatomic_read(&tb->cflags);
} }
/* current cflags for hashing/comparison */ /* current cflags for hashing/comparison */

View file

@ -93,8 +93,8 @@ static inline bool cpu_physical_memory_get_dirty(struct uc_struct *uc, ram_addr_
// Unicorn: commented out // Unicorn: commented out
//rcu_read_lock(); //rcu_read_lock();
// Unicorn: atomic_read used instead of atomic_rcu_read // Unicorn: qatomic_read used instead of qatomic_rcu_read
blocks = atomic_read(&uc->ram_list.dirty_memory[client]); blocks = qatomic_read(&uc->ram_list.dirty_memory[client]);
idx = page / DIRTY_MEMORY_BLOCK_SIZE; idx = page / DIRTY_MEMORY_BLOCK_SIZE;
offset = page % DIRTY_MEMORY_BLOCK_SIZE; offset = page % DIRTY_MEMORY_BLOCK_SIZE;
@ -138,8 +138,8 @@ static inline bool cpu_physical_memory_all_dirty(struct uc_struct *uc, ram_addr_
// Unicorn: commented out // Unicorn: commented out
//rcu_read_lock(); //rcu_read_lock();
// Unicorn: atomic_read used instead of atomic_rcu_read // Unicorn: qatomic_read used instead of qatomic_rcu_read
blocks = atomic_read(&uc->ram_list.dirty_memory[client]); blocks = qatomic_read(&uc->ram_list.dirty_memory[client]);
idx = page / DIRTY_MEMORY_BLOCK_SIZE; idx = page / DIRTY_MEMORY_BLOCK_SIZE;
offset = page % DIRTY_MEMORY_BLOCK_SIZE; offset = page % DIRTY_MEMORY_BLOCK_SIZE;
@ -192,8 +192,8 @@ static inline void cpu_physical_memory_set_dirty_flag(struct uc_struct *uc, ram_
// Unicorn: commented out // Unicorn: commented out
//rcu_read_lock(); //rcu_read_lock();
// Unicorn: atomic_read used instead of atomic_rcu_read // Unicorn: qatomic_read used instead of qatomic_rcu_read
blocks = atomic_read(&uc->ram_list.dirty_memory[client]); blocks = qatomic_read(&uc->ram_list.dirty_memory[client]);
set_bit_atomic(offset, blocks->blocks[idx]); set_bit_atomic(offset, blocks->blocks[idx]);

View file

@ -27,7 +27,7 @@ tb_lookup__cpu_state(CPUState *cpu, target_ulong *pc, target_ulong *cs_base,
cpu_get_tb_cpu_state(env, pc, cs_base, flags); cpu_get_tb_cpu_state(env, pc, cs_base, flags);
hash = tb_jmp_cache_hash_func(*pc); hash = tb_jmp_cache_hash_func(*pc);
tb = atomic_read(&cpu->tb_jmp_cache[hash]); tb = qatomic_read(&cpu->tb_jmp_cache[hash]);
if (likely(tb && if (likely(tb &&
tb->pc == *pc && tb->pc == *pc &&
tb->cs_base == *cs_base && tb->cs_base == *cs_base &&
@ -39,7 +39,7 @@ tb_lookup__cpu_state(CPUState *cpu, target_ulong *pc, target_ulong *cs_base,
if (tb == NULL) { if (tb == NULL) {
return NULL; return NULL;
} }
atomic_set(&cpu->tb_jmp_cache[hash], tb); qatomic_set(&cpu->tb_jmp_cache[hash], tb);
return tb; return tb;
} }

View file

@ -133,49 +133,49 @@ void _ReadWriteBarrier(void);
* no effect on the generated code but not using the atomic primitives * no effect on the generated code but not using the atomic primitives
* will get flagged by sanitizers as a violation. * will get flagged by sanitizers as a violation.
*/ */
#define atomic_read__nocheck(ptr) \ #define qatomic_read__nocheck(ptr) \
__atomic_load_n(ptr, __ATOMIC_RELAXED) __atomic_load_n(ptr, __ATOMIC_RELAXED)
#define atomic_read(ptr) \ #define qatomic_read(ptr) \
({ \ ({ \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
atomic_read__nocheck(ptr); \ qatomic_read__nocheck(ptr); \
}) })
#define atomic_set__nocheck(ptr, i) \ #define qatomic_set__nocheck(ptr, i) \
__atomic_store_n(ptr, i, __ATOMIC_RELAXED) __atomic_store_n(ptr, i, __ATOMIC_RELAXED)
#define atomic_set(ptr, i) do { \ #define qatomic_set(ptr, i) do { \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
atomic_set__nocheck(ptr, i); \ qatomic_set__nocheck(ptr, i); \
} while(0) } while(0)
/* See above: most compilers currently treat consume and acquire the /* See above: most compilers currently treat consume and acquire the
* same, but this slows down atomic_rcu_read unnecessarily. * same, but this slows down qatomic_rcu_read unnecessarily.
*/ */
#ifdef __SANITIZE_THREAD__ #ifdef __SANITIZE_THREAD__
#define atomic_rcu_read__nocheck(ptr, valptr) \ #define qatomic_rcu_read__nocheck(ptr, valptr) \
__atomic_load(ptr, valptr, __ATOMIC_CONSUME); __atomic_load(ptr, valptr, __ATOMIC_CONSUME);
#else #else
#define atomic_rcu_read__nocheck(ptr, valptr) \ #define qatomic_rcu_read__nocheck(ptr, valptr) \
__atomic_load(ptr, valptr, __ATOMIC_RELAXED); \ __atomic_load(ptr, valptr, __ATOMIC_RELAXED); \
smp_read_barrier_depends(); smp_read_barrier_depends();
#endif #endif
#define atomic_rcu_read(ptr) \ #define qatomic_rcu_read(ptr) \
({ \ ({ \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
typeof_strip_qual(*ptr) _val; \ typeof_strip_qual(*ptr) _val; \
atomic_rcu_read__nocheck(ptr, &_val); \ qatomic_rcu_read__nocheck(ptr, &_val); \
_val; \ _val; \
}) })
#define atomic_rcu_set(ptr, i) do { \ #define qatomic_rcu_set(ptr, i) do { \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
__atomic_store_n(ptr, i, __ATOMIC_RELEASE); \ __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \
} while(0) } while(0)
#define atomic_load_acquire(ptr) \ #define qatomic_load_acquire(ptr) \
({ \ ({ \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
typeof_strip_qual(*ptr) _val; \ typeof_strip_qual(*ptr) _val; \
@ -183,60 +183,60 @@ void _ReadWriteBarrier(void);
_val; \ _val; \
}) })
#define atomic_store_release(ptr, i) do { \ #define qatomic_store_release(ptr, i) do { \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
__atomic_store_n(ptr, i, __ATOMIC_RELEASE); \ __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \
} while(0) } while(0)
/* All the remaining operations are fully sequentially consistent */ /* All the remaining operations are fully sequentially consistent */
#define atomic_xchg__nocheck(ptr, i) ({ \ #define qatomic_xchg__nocheck(ptr, i) ({ \
__atomic_exchange_n(ptr, (i), __ATOMIC_SEQ_CST); \ __atomic_exchange_n(ptr, (i), __ATOMIC_SEQ_CST); \
}) })
#define atomic_xchg(ptr, i) ({ \ #define qatomic_xchg(ptr, i) ({ \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
atomic_xchg__nocheck(ptr, i); \ qatomic_xchg__nocheck(ptr, i); \
}) })
/* Returns the eventual value, failed or not */ /* Returns the eventual value, failed or not */
#define atomic_cmpxchg__nocheck(ptr, old, new) ({ \ #define qatomic_cmpxchg__nocheck(ptr, old, new) ({ \
typeof_strip_qual(*ptr) _old = (old); \ typeof_strip_qual(*ptr) _old = (old); \
(void)__atomic_compare_exchange_n(ptr, &_old, new, false, \ (void)__atomic_compare_exchange_n(ptr, &_old, new, false, \
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \ __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \
_old; \ _old; \
}) })
#define atomic_cmpxchg(ptr, old, new) ({ \ #define qatomic_cmpxchg(ptr, old, new) ({ \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
atomic_cmpxchg__nocheck(ptr, old, new); \ qatomic_cmpxchg__nocheck(ptr, old, new); \
}) })
/* Provide shorter names for GCC atomic builtins, return old value */ /* Provide shorter names for GCC atomic builtins, return old value */
#define atomic_fetch_inc(ptr) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST) #define qatomic_fetch_inc(ptr) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)
#define atomic_fetch_dec(ptr) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST) #define qatomic_fetch_dec(ptr) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)
#define atomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST) #define qatomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)
#define atomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST) #define qatomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)
#define atomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST) #define qatomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)
#define atomic_fetch_or(ptr, n) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST) #define qatomic_fetch_or(ptr, n) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)
#define atomic_fetch_xor(ptr, n) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST) #define qatomic_fetch_xor(ptr, n) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST)
#define atomic_inc_fetch(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST) #define qatomic_inc_fetch(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST)
#define atomic_dec_fetch(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_SEQ_CST) #define qatomic_dec_fetch(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_SEQ_CST)
#define atomic_add_fetch(ptr, n) __atomic_add_fetch(ptr, n, __ATOMIC_SEQ_CST) #define qatomic_add_fetch(ptr, n) __atomic_add_fetch(ptr, n, __ATOMIC_SEQ_CST)
#define atomic_sub_fetch(ptr, n) __atomic_sub_fetch(ptr, n, __ATOMIC_SEQ_CST) #define qatomic_sub_fetch(ptr, n) __atomic_sub_fetch(ptr, n, __ATOMIC_SEQ_CST)
#define atomic_and_fetch(ptr, n) __atomic_and_fetch(ptr, n, __ATOMIC_SEQ_CST) #define qatomic_and_fetch(ptr, n) __atomic_and_fetch(ptr, n, __ATOMIC_SEQ_CST)
#define atomic_or_fetch(ptr, n) __atomic_or_fetch(ptr, n, __ATOMIC_SEQ_CST) #define qatomic_or_fetch(ptr, n) __atomic_or_fetch(ptr, n, __ATOMIC_SEQ_CST)
#define atomic_xor_fetch(ptr, n) __atomic_xor_fetch(ptr, n, __ATOMIC_SEQ_CST) #define qatomic_xor_fetch(ptr, n) __atomic_xor_fetch(ptr, n, __ATOMIC_SEQ_CST)
/* And even shorter names that return void. */ /* And even shorter names that return void. */
#define atomic_inc(ptr) ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)) #define qatomic_inc(ptr) ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST))
#define atomic_dec(ptr) ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)) #define qatomic_dec(ptr) ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST))
#define atomic_add(ptr, n) ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)) #define qatomic_add(ptr, n) ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST))
#define atomic_sub(ptr, n) ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)) #define qatomic_sub(ptr, n) ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST))
#define atomic_and(ptr, n) ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)) #define qatomic_and(ptr, n) ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST))
#define atomic_or(ptr, n) ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)) #define qatomic_or(ptr, n) ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST))
#define atomic_xor(ptr, n) ((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST)) #define qatomic_xor(ptr, n) ((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST))
#else /* __ATOMIC_RELAXED */ #else /* __ATOMIC_RELAXED */
@ -286,7 +286,7 @@ void _ReadWriteBarrier(void);
* but it is a full barrier at the hardware level. Add a compiler barrier * but it is a full barrier at the hardware level. Add a compiler barrier
* to make it a full barrier also at the compiler level. * to make it a full barrier also at the compiler level.
*/ */
#define atomic_xchg(ptr, i) (barrier(), __sync_lock_test_and_set(ptr, i)) #define qatomic_xchg(ptr, i) (barrier(), __sync_lock_test_and_set(ptr, i))
#elif defined(_ARCH_PPC) #elif defined(_ARCH_PPC)
@ -339,11 +339,11 @@ void _ReadWriteBarrier(void);
/* These will only be atomic if the processor does the fetch or store /* These will only be atomic if the processor does the fetch or store
* in a single issue memory operation * in a single issue memory operation
*/ */
#define atomic_read__nocheck(p) (*(__typeof__(*(p)) volatile*) (p)) #define qatomic_read__nocheck(p) (*(__typeof__(*(p)) volatile*) (p))
#define atomic_set__nocheck(p, i) ((*(__typeof__(*(p)) volatile*) (p)) = (i)) #define qatomic_set__nocheck(p, i) ((*(__typeof__(*(p)) volatile*) (p)) = (i))
#define atomic_read(ptr) atomic_read__nocheck(ptr) #define qatomic_read(ptr) qatomic_read__nocheck(ptr)
#define atomic_set(ptr, i) atomic_set__nocheck(ptr,i) #define qatomic_set(ptr, i) qatomic_set__nocheck(ptr,i)
/** /**
* atomic_rcu_read - reads a RCU-protected pointer to a local variable * atomic_rcu_read - reads a RCU-protected pointer to a local variable
@ -363,8 +363,8 @@ void _ReadWriteBarrier(void);
* *
* Should match atomic_rcu_set(), atomic_xchg(), atomic_cmpxchg(). * Should match atomic_rcu_set(), atomic_xchg(), atomic_cmpxchg().
*/ */
#define atomic_rcu_read(ptr) ({ \ #define qatomic_rcu_read(ptr) ({ \
typeof(*ptr) _val = atomic_read(ptr); \ typeof(*ptr) _val = qatomic_read(ptr); \
smp_read_barrier_depends(); \ smp_read_barrier_depends(); \
_val; \ _val; \
}) })
@ -380,94 +380,71 @@ void _ReadWriteBarrier(void);
* *
* Should match atomic_rcu_read(). * Should match atomic_rcu_read().
*/ */
#define atomic_rcu_set(ptr, i) do { \ #define qatomic_rcu_set(ptr, i) do { \
smp_wmb(); \ smp_wmb(); \
atomic_set(ptr, i); \ qatomic_set(ptr, i); \
} while (0) } while (0)
#define atomic_load_acquire(ptr) ({ \ #define qatomic_load_acquire(ptr) ({ \
typeof(*ptr) _val = atomic_read(ptr); \ typeof(*ptr) _val = qatomic_read(ptr); \
smp_mb_acquire(); \ smp_mb_acquire(); \
_val; \ _val; \
}) })
#define atomic_store_release(ptr, i) do { \ #define qatomic_store_release(ptr, i) do { \
smp_mb_release(); \ smp_mb_release(); \
atomic_set(ptr, i); \ qatomic_set(ptr, i); \
} while (0) } while (0)
#ifndef atomic_xchg #ifndef qatomic_xchg
#if defined(__clang__) #if defined(__clang__)
#define atomic_xchg(ptr, i) __sync_swap(ptr, i) #define qatomic_xchg(ptr, i) __sync_swap(ptr, i)
#else #else
/* __sync_lock_test_and_set() is documented to be an acquire barrier only. */ /* __sync_lock_test_and_set() is documented to be an acquire barrier only. */
#define atomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i)) #define qatomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i))
#endif #endif
#endif #endif
#define atomic_xchg__nocheck atomic_xchg #define qatomic_xchg__nocheck atomic_xchg
/* Provide shorter names for GCC atomic builtins. */ /* Provide shorter names for GCC atomic builtins. */
#ifdef _MSC_VER
// these return the new value (so we make it return the previous value)
#define atomic_fetch_inc(ptr) ((InterlockedIncrement(ptr))-1)
#define atomic_fetch_dec(ptr) ((InterlockedDecrement(ptr))+1)
#define atomic_fetch_add(ptr, n) ((InterlockedAdd(ptr, n))-n)
#define atomic_fetch_sub(ptr, n) ((InterlockedAdd(ptr, -n))+n)
#define atomic_fetch_and(ptr, n) ((InterlockedAnd(ptr, n)))
#define atomic_fetch_or(ptr, n) ((InterlockedOr(ptr, n)))
#define atomic_fetch_xor(ptr, n) ((InterlockedXor(ptr, n)))
#define atomic_inc_fetch(ptr) (InterlockedIncrement((long*)(ptr)))
#define atomic_dec_fetch(ptr) (InterlockedDecrement((long*)(ptr)))
#define atomic_add_fetch(ptr, n) (InterlockedExchangeAdd((long*)ptr, n) + n)
#define atomic_sub_fetch(ptr, n) (InterlockedExchangeAdd((long*)ptr, n) - n)
#define atomic_and_fetch(ptr, n) (InterlockedAnd((long*)ptr, n) & n)
#define atomic_or_fetch(ptr, n) (InterlockedOr((long*)ptr, n) | n)
#define atomic_xor_fetch(ptr, n) (InterlockedXor((long*)ptr, n) ^ n)
#define atomic_cmpxchg(ptr, old, new) ((InterlockedCompareExchange(ptr, old, new)))
#define atomic_cmpxchg__nocheck(ptr, old, new) atomic_cmpxchg(ptr, old, new)
#else
// these return the previous value // these return the previous value
#define atomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1) #define qatomic_fetch_inc(ptr) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)
#define atomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1) #define qatomic_fetch_dec(ptr) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)
#define atomic_fetch_add(ptr, n) __sync_fetch_and_add(ptr, n)
#define atomic_fetch_sub(ptr, n) __sync_fetch_and_sub(ptr, n)
#define atomic_fetch_and(ptr, n) __sync_fetch_and_and(ptr, n)
#define atomic_fetch_or(ptr, n) __sync_fetch_and_or(ptr, n)
#define atomic_fetch_xor(ptr, n) __sync_fetch_and_xor(ptr, n)
#define atomic_inc_fetch(ptr) __sync_add_and_fetch(ptr, 1) #define qatomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)
#define atomic_dec_fetch(ptr) __sync_add_and_fetch(ptr, -1) #define qatomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)
#define atomic_add_fetch(ptr, n) __sync_add_and_fetch(ptr, n) #define qatomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)
#define atomic_sub_fetch(ptr, n) __sync_sub_and_fetch(ptr, n) #define qatomic_fetch_or(ptr, n) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)
#define atomic_and_fetch(ptr, n) __sync_and_and_fetch(ptr, n) #define qatomic_fetch_xor(ptr, n) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST)
#define atomic_or_fetch(ptr, n) __sync_or_and_fetch(ptr, n)
#define atomic_xor_fetch(ptr, n) __sync_xor_and_fetch(ptr, n)
#define atomic_cmpxchg(ptr, old, new) __sync_val_compare_and_swap(ptr, old, new) #define qatomic_inc_fetch(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST)
#define atomic_cmpxchg__nocheck(ptr, old, new) atomic_cmpxchg(ptr, old, new) #define qatomic_dec_fetch(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_SEQ_CST)
#endif #define qatomic_add_fetch(ptr, n) __atomic_add_fetch(ptr, n, __ATOMIC_SEQ_CST)
#define qatomic_sub_fetch(ptr, n) __atomic_sub_fetch(ptr, n, __ATOMIC_SEQ_CST)
#define qatomic_and_fetch(ptr, n) __atomic_and_fetch(ptr, n, __ATOMIC_SEQ_CST)
#define qatomic_or_fetch(ptr, n) __atomic_or_fetch(ptr, n, __ATOMIC_SEQ_CST)
#define qatomic_xor_fetch(ptr, n) __atomic_xor_fetch(ptr, n, __ATOMIC_SEQ_CST)
#define qatomic_cmpxchg(ptr, old, new) __sync_val_compare_and_swap(ptr, old, new)
#define qatomic_cmpxchg__nocheck(ptr, old, new) qatomic_cmpxchg(ptr, old, new)
/* And even shorter names that return void. */ /* And even shorter names that return void. */
#ifdef _MSC_VER #define qatomic_inc(ptr) \
#define atomic_inc(ptr) ((void) InterlockedIncrement(ptr)) ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST))
#define atomic_dec(ptr) ((void) InterlockedDecrement(ptr)) #define qatomic_dec(ptr) \
#define atomic_add(ptr, n) ((void) InterlockedAdd(ptr, n)) ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST))
#define atomic_sub(ptr, n) ((void) InterlockedAdd(ptr, -n)) #define qatomic_add(ptr, n) \
#define atomic_and(ptr, n) ((void) InterlockedAnd(ptr, n)) ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST))
#define atomic_or(ptr, n) ((void) InterlockedOr(ptr, n)) #define qatomic_sub(ptr, n) \
#define atomic_xor(ptr, n) ((void) InterlockedXor(ptr, n)) ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST))
#else #define qatomic_and(ptr, n) \
#define atomic_inc(ptr) ((void) __sync_fetch_and_add(ptr, 1)) ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST))
#define atomic_dec(ptr) ((void) __sync_fetch_and_add(ptr, -1)) #define qatomic_or(ptr, n) \
#define atomic_add(ptr, n) ((void) __sync_fetch_and_add(ptr, n)) ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST))
#define atomic_sub(ptr, n) ((void) __sync_fetch_and_sub(ptr, n)) #define qatomic_xor(ptr, n) \
#define atomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n)) ((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST))
#define atomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n))
#define atomic_xor(ptr, n) ((void) __sync_fetch_and_xor(ptr, n))
#endif
#endif /* __ATOMIC_RELAXED */ #endif /* __ATOMIC_RELAXED */
@ -481,11 +458,11 @@ void _ReadWriteBarrier(void);
/* This is more efficient than a store plus a fence. */ /* This is more efficient than a store plus a fence. */
#if !defined(__SANITIZE_THREAD__) #if !defined(__SANITIZE_THREAD__)
#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__) #if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
#define atomic_mb_set(ptr, i) ((void)atomic_xchg(ptr, i)) #define qatomic_mb_set(ptr, i) ((void)qatomic_xchg(ptr, i))
#endif #endif
#endif #endif
/* atomic_mb_read/set semantics map Java volatile variables. They are /* qatomic_mb_read/set semantics map Java volatile variables. They are
* less expensive on some platforms (notably POWER) than fully * less expensive on some platforms (notably POWER) than fully
* sequentially consistent operations. * sequentially consistent operations.
* *
@ -493,14 +470,14 @@ void _ReadWriteBarrier(void);
* use. See docs/atomic.txt for more discussion. * use. See docs/atomic.txt for more discussion.
*/ */
#ifndef atomic_mb_read #ifndef qatomic_mb_read
#define atomic_mb_read(ptr) \ #define qatomic_mb_read(ptr) \
atomic_load_acquire(ptr) qatomic_load_acquire(ptr)
#endif #endif
#ifndef atomic_mb_set #ifndef qatomic_mb_set
#define atomic_mb_set(ptr, i) do { \ #define qatomic_mb_set(ptr, i) do { \
atomic_store_release(ptr, i); \ qatomic_store_release(ptr, i); \
smp_mb(); \ smp_mb(); \
} while(0) } while(0)
#endif #endif

View file

@ -40,20 +40,20 @@
*/ */
#if defined(CONFIG_ATOMIC128) #if defined(CONFIG_ATOMIC128)
static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new) static inline Int128 qatomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
{ {
return atomic_cmpxchg__nocheck(ptr, cmp, new); return qatomic_cmpxchg__nocheck(ptr, cmp, new);
} }
# define HAVE_CMPXCHG128 1 # define HAVE_CMPXCHG128 1
#elif defined(CONFIG_CMPXCHG128) #elif defined(CONFIG_CMPXCHG128)
static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new) static inline Int128 qatomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
{ {
return __sync_val_compare_and_swap_16(ptr, cmp, new); return __sync_val_compare_and_swap_16(ptr, cmp, new);
} }
# define HAVE_CMPXCHG128 1 # define HAVE_CMPXCHG128 1
#elif defined(__aarch64__) #elif defined(__aarch64__)
/* Through gcc 8, aarch64 has no support for 128-bit at all. */ /* Through gcc 8, aarch64 has no support for 128-bit at all. */
static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new) static inline Int128 qatomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
{ {
uint64_t cmpl = int128_getlo(cmp), cmph = int128_gethi(cmp); uint64_t cmpl = int128_getlo(cmp), cmph = int128_gethi(cmp);
uint64_t newl = int128_getlo(new), newh = int128_gethi(new); uint64_t newl = int128_getlo(new), newh = int128_gethi(new);
@ -79,26 +79,26 @@ static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
#else #else
/* Fallback definition that must be optimized away, or error. */ /* Fallback definition that must be optimized away, or error. */
Int128 QEMU_ERROR("unsupported atomic") Int128 QEMU_ERROR("unsupported atomic")
atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new); qatomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new);
# define HAVE_CMPXCHG128 0 # define HAVE_CMPXCHG128 0
#endif /* Some definition for HAVE_CMPXCHG128 */ #endif /* Some definition for HAVE_CMPXCHG128 */
#if defined(CONFIG_ATOMIC128) #if defined(CONFIG_ATOMIC128)
static inline Int128 atomic16_read(Int128 *ptr) static inline Int128 qatomic16_read(Int128 *ptr)
{ {
return atomic_read__nocheck(ptr); return qatomic_read__nocheck(ptr);
} }
static inline void atomic16_set(Int128 *ptr, Int128 val) static inline void qatomic16_set(Int128 *ptr, Int128 val)
{ {
atomic_set__nocheck(ptr, val); qatomic_set__nocheck(ptr, val);
} }
# define HAVE_ATOMIC128 1 # define HAVE_ATOMIC128 1
#elif !defined(CONFIG_USER_ONLY) && defined(__aarch64__) #elif !defined(CONFIG_USER_ONLY) && defined(__aarch64__)
/* We can do better than cmpxchg for AArch64. */ /* We can do better than cmpxchg for AArch64. */
static inline Int128 atomic16_read(Int128 *ptr) static inline Int128 qatomic16_read(Int128 *ptr)
{ {
uint64_t l, h; uint64_t l, h;
uint32_t tmp; uint32_t tmp;
@ -112,7 +112,7 @@ static inline Int128 atomic16_read(Int128 *ptr)
return int128_make128(l, h); return int128_make128(l, h);
} }
static inline void atomic16_set(Int128 *ptr, Int128 val) static inline void qatomic16_set(Int128 *ptr, Int128 val)
{ {
uint64_t l = int128_getlo(val), h = int128_gethi(val); uint64_t l = int128_getlo(val), h = int128_gethi(val);
uint64_t t1, t2; uint64_t t1, t2;
@ -127,26 +127,26 @@ static inline void atomic16_set(Int128 *ptr, Int128 val)
# define HAVE_ATOMIC128 1 # define HAVE_ATOMIC128 1
#elif !defined(CONFIG_USER_ONLY) && HAVE_CMPXCHG128 #elif !defined(CONFIG_USER_ONLY) && HAVE_CMPXCHG128
static inline Int128 atomic16_read(Int128 *ptr) static inline Int128 qatomic16_read(Int128 *ptr)
{ {
/* Maybe replace 0 with 0, returning the old value. */ /* Maybe replace 0 with 0, returning the old value. */
return atomic16_cmpxchg(ptr, 0, 0); return qatomic16_cmpxchg(ptr, 0, 0);
} }
static inline void atomic16_set(Int128 *ptr, Int128 val) static inline void qatomic16_set(Int128 *ptr, Int128 val)
{ {
Int128 old = *ptr, cmp; Int128 old = *ptr, cmp;
do { do {
cmp = old; cmp = old;
old = atomic16_cmpxchg(ptr, cmp, val); old = qatomic16_cmpxchg(ptr, cmp, val);
} while (old != cmp); } while (old != cmp);
} }
# define HAVE_ATOMIC128 1 # define HAVE_ATOMIC128 1
#else #else
/* Fallback definitions that must be optimized away, or error. */ /* Fallback definitions that must be optimized away, or error. */
Int128 QEMU_ERROR("unsupported atomic") atomic16_read(Int128 *ptr); Int128 QEMU_ERROR("unsupported atomic") qatomic16_read(Int128 *ptr);
void QEMU_ERROR("unsupported atomic") atomic16_set(Int128 *ptr, Int128 val); void QEMU_ERROR("unsupported atomic") qatomic16_set(Int128 *ptr, Int128 val);
# define HAVE_ATOMIC128 0 # define HAVE_ATOMIC128 0
#endif /* Some definition for HAVE_ATOMIC128 */ #endif /* Some definition for HAVE_ATOMIC128 */

View file

@ -52,7 +52,7 @@ static inline void set_bit_atomic(long nr, unsigned long *addr)
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
unsigned long *p = addr + BIT_WORD(nr); unsigned long *p = addr + BIT_WORD(nr);
atomic_or(p, mask); qatomic_or(p, mask);
} }
/** /**

View file

@ -429,7 +429,7 @@ static inline void cpu_tb_jmp_cache_clear(CPUState *cpu)
unsigned int i; unsigned int i;
for (i = 0; i < TB_JMP_CACHE_SIZE; i++) { for (i = 0; i < TB_JMP_CACHE_SIZE; i++) {
atomic_set(&cpu->tb_jmp_cache[i], NULL); qatomic_set(&cpu->tb_jmp_cache[i], NULL);
} }
} }

View file

@ -334,12 +334,12 @@ static void flatview_destroy(FlatView *view)
static void flatview_ref(FlatView *view) static void flatview_ref(FlatView *view)
{ {
atomic_inc(&view->ref); qatomic_inc(&view->ref);
} }
static void flatview_unref(FlatView *view) static void flatview_unref(FlatView *view)
{ {
if (atomic_fetch_dec(&view->ref) == 1) { if (qatomic_fetch_dec(&view->ref) == 1) {
flatview_destroy(view); flatview_destroy(view);
} }
} }
@ -355,8 +355,8 @@ void unicorn_free_empty_flat_view(struct uc_struct *uc)
FlatView *address_space_to_flatview(AddressSpace *as) FlatView *address_space_to_flatview(AddressSpace *as)
{ {
// Unicorn: atomic_read used instead of atomic_rcu_read // Unicorn: qatomic_read used instead of qatomic_rcu_read
return atomic_read(&as->current_map); return qatomic_read(&as->current_map);
} }
AddressSpaceDispatch *flatview_to_dispatch(FlatView *fv) AddressSpaceDispatch *flatview_to_dispatch(FlatView *fv)
@ -900,7 +900,7 @@ static void address_space_set_flatview(AddressSpace *as)
} }
/* Writes are protected by the BQL. */ /* Writes are protected by the BQL. */
atomic_set(&as->current_map, new_view); qatomic_set(&as->current_map, new_view);
if (old_view) { if (old_view) {
flatview_unref(old_view); flatview_unref(old_view);
} }

View file

@ -87,11 +87,11 @@ void cpu_reset_interrupt(CPUState *cpu, int mask)
void cpu_exit(CPUState *cpu) void cpu_exit(CPUState *cpu)
{ {
atomic_set(&cpu->exit_request, 1); qatomic_set(&cpu->exit_request, 1);
/* Ensure cpu_exec will see the exit request after TCG has exited. */ /* Ensure cpu_exec will see the exit request after TCG has exited. */
smp_wmb(); smp_wmb();
atomic_set(&cpu->tcg_exit_req, 1); qatomic_set(&cpu->tcg_exit_req, 1);
atomic_set(&cpu->icount_decr_ptr->u16.high, -1); qatomic_set(&cpu->icount_decr_ptr->u16.high, -1);
} }
static void cpu_common_noop(CPUState *cpu) static void cpu_common_noop(CPUState *cpu)
@ -146,7 +146,7 @@ static void cpu_common_reset(CPUState *cpu)
cpu->mem_io_pc = 0; cpu->mem_io_pc = 0;
cpu->mem_io_vaddr = 0; cpu->mem_io_vaddr = 0;
cpu->icount_extra = 0; cpu->icount_extra = 0;
atomic_set(&cpu->icount_decr_ptr->u32, 0); qatomic_set(&cpu->icount_decr_ptr->u32, 0);
cpu->can_do_io = 0; cpu->can_do_io = 0;
cpu->exception_index = -1; cpu->exception_index = -1;
cpu->crash_occurred = false; cpu->crash_occurred = false;

View file

@ -547,7 +547,7 @@ Object *object_dynamic_cast_assert(struct uc_struct *uc, Object *obj, const char
Object *inst; Object *inst;
for (i = 0; obj && i < OBJECT_CLASS_CAST_CACHE; i++) { for (i = 0; obj && i < OBJECT_CLASS_CAST_CACHE; i++) {
if (atomic_read(&obj->class->object_cast_cache[i]) == typename) { if (qatomic_read(&obj->class->object_cast_cache[i]) == typename) {
goto out; goto out;
} }
} }
@ -564,10 +564,10 @@ Object *object_dynamic_cast_assert(struct uc_struct *uc, Object *obj, const char
if (obj && obj == inst) { if (obj && obj == inst) {
for (i = 1; i < OBJECT_CLASS_CAST_CACHE; i++) { for (i = 1; i < OBJECT_CLASS_CAST_CACHE; i++) {
atomic_set(&obj->class->object_cast_cache[i - 1], qatomic_set(&obj->class->object_cast_cache[i - 1],
atomic_read(&obj->class->object_cast_cache[i])); qatomic_read(&obj->class->object_cast_cache[i]));
} }
atomic_set(&obj->class->object_cast_cache[i - 1], typename); qatomic_set(&obj->class->object_cast_cache[i - 1], typename);
} }
out: out:
@ -634,7 +634,7 @@ ObjectClass *object_class_dynamic_cast_assert(struct uc_struct *uc, ObjectClass
int i; int i;
for (i = 0; class && i < OBJECT_CLASS_CAST_CACHE; i++) { for (i = 0; class && i < OBJECT_CLASS_CAST_CACHE; i++) {
if (atomic_read(&class->class_cast_cache[i]) == typename) { if (qatomic_read(&class->class_cast_cache[i]) == typename) {
ret = class; ret = class;
goto out; goto out;
} }
@ -655,10 +655,10 @@ ObjectClass *object_class_dynamic_cast_assert(struct uc_struct *uc, ObjectClass
#ifdef CONFIG_QOM_CAST_DEBUG #ifdef CONFIG_QOM_CAST_DEBUG
if (class && ret == class) { if (class && ret == class) {
for (i = 1; i < OBJECT_CLASS_CAST_CACHE; i++) { for (i = 1; i < OBJECT_CLASS_CAST_CACHE; i++) {
atomic_set(&class->class_cast_cache[i - 1], qatomic_set(&class->class_cast_cache[i - 1],
atomic_read(&class->class_cast_cache[i])); qatomic_read(&class->class_cast_cache[i]));
} }
atomic_set(&class->class_cast_cache[i - 1], typename); qatomic_set(&class->class_cast_cache[i - 1], typename);
} }
out: out:
#endif #endif
@ -826,7 +826,7 @@ void object_ref(Object *obj)
if (!obj) { if (!obj) {
return; return;
} }
atomic_inc(&obj->ref); qatomic_inc(&obj->ref);
} }
void object_unref(struct uc_struct *uc, Object *obj) void object_unref(struct uc_struct *uc, Object *obj)
@ -837,7 +837,7 @@ void object_unref(struct uc_struct *uc, Object *obj)
g_assert(obj->ref > 0); g_assert(obj->ref > 0);
/* parent always holds a reference to its children */ /* parent always holds a reference to its children */
if (atomic_fetch_dec(&obj->ref) == 1) { if (qatomic_fetch_dec(&obj->ref) == 1) {
object_finalize(uc, obj); object_finalize(uc, obj);
} }
} }

View file

@ -314,11 +314,11 @@ static void store_tag1(uint64_t ptr, uint8_t *mem, int tag)
static void store_tag1_parallel(uint64_t ptr, uint8_t *mem, int tag) static void store_tag1_parallel(uint64_t ptr, uint8_t *mem, int tag)
{ {
int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4; int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
uint8_t old = atomic_read(mem); uint8_t old = qatomic_read(mem);
while (1) { while (1) {
uint8_t new = deposit32(old, ofs, 4, tag); uint8_t new = deposit32(old, ofs, 4, tag);
uint8_t cmp = atomic_cmpxchg(mem, old, new); uint8_t cmp = qatomic_cmpxchg(mem, old, new);
if (likely(cmp == old)) { if (likely(cmp == old)) {
return; return;
} }
@ -399,7 +399,7 @@ static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt,
2 * TAG_GRANULE, MMU_DATA_STORE, 1, ra); 2 * TAG_GRANULE, MMU_DATA_STORE, 1, ra);
if (mem1) { if (mem1) {
tag |= tag << 4; tag |= tag << 4;
atomic_set(mem1, tag); qatomic_set(mem1, tag);
} }
} }
} }

View file

@ -70,7 +70,7 @@ void helper_cmpxchg8b(CPUX86State *env, target_ulong a0)
uint64_t *haddr = g2h(a0); uint64_t *haddr = g2h(a0);
cmpv = cpu_to_le64(cmpv); cmpv = cpu_to_le64(cmpv);
newv = cpu_to_le64(newv); newv = cpu_to_le64(newv);
oldv = atomic_cmpxchg__nocheck(haddr, cmpv, newv); oldv = qatomic_cmpxchg__nocheck(haddr, cmpv, newv);
oldv = le64_to_cpu(oldv); oldv = le64_to_cpu(oldv);
} }
#else #else

View file

@ -229,7 +229,7 @@ static void riscv_cpu_dump_state(CPUState *cs, FILE *f,
cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mhartid ", env->mhartid); cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mhartid ", env->mhartid);
cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mstatus ", env->mstatus); cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mstatus ", env->mstatus);
cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mip ", cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mip ",
(target_ulong)atomic_read(&env->mip)); (target_ulong)qatomic_read(&env->mip));
cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mie ", env->mie); cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mie ", env->mie);
cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mideleg ", env->mideleg); cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mideleg ", env->mideleg);
cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "medeleg ", env->medeleg); cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "medeleg ", env->medeleg);
@ -280,7 +280,7 @@ static bool riscv_cpu_has_work(CPUState *cs)
* Definition of the WFI instruction requires it to ignore the privilege * Definition of the WFI instruction requires it to ignore the privilege
* mode and delegation registers, but respect individual enables * mode and delegation registers, but respect individual enables
*/ */
return (atomic_read(&env->mip) & env->mie) != 0; return (qatomic_read(&env->mip) & env->mie) != 0;
#else #else
return true; return true;
#endif #endif

View file

@ -148,12 +148,12 @@ struct CPURISCVState {
/* /*
* CAUTION! Unlike the rest of this struct, mip is accessed asynchonously * CAUTION! Unlike the rest of this struct, mip is accessed asynchonously
* by I/O threads. It should be read with atomic_read. It should be updated * by I/O threads. It should be read with qatomic_read. It should be updated
* using riscv_cpu_update_mip with the iothread mutex held. The iothread * using riscv_cpu_update_mip with the iothread mutex held. The iothread
* mutex must be held because mip must be consistent with the CPU inturrept * mutex must be held because mip must be consistent with the CPU inturrept
* state. riscv_cpu_update_mip calls cpu_interrupt or cpu_reset_interrupt * state. riscv_cpu_update_mip calls cpu_interrupt or cpu_reset_interrupt
* wuth the invariant that CPU_INTERRUPT_HARD is set iff mip is non-zero. * wuth the invariant that CPU_INTERRUPT_HARD is set iff mip is non-zero.
* mip is 32-bits to allow atomic_read on 32-bit hosts. * mip is 32-bits to allow qatomic_read on 32-bit hosts.
*/ */
target_ulong mip; target_ulong mip;

View file

@ -253,12 +253,12 @@ int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts)
uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value) uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value)
{ {
CPURISCVState *env = &cpu->env; CPURISCVState *env = &cpu->env;
uint32_t old, new, cmp = atomic_read(&env->mip); uint32_t old, new, cmp = qatomic_read(&env->mip);
do { do {
old = cmp; old = cmp;
new = (old & ~mask) | (value & mask); new = (old & ~mask) | (value & mask);
cmp = atomic_cmpxchg(&env->mip, old, new); cmp = qatomic_cmpxchg(&env->mip, old, new);
} while (old != cmp); } while (old != cmp);
if (new) { if (new) {
@ -531,7 +531,7 @@ restart:
*pte_pa = pte = updated_pte; *pte_pa = pte = updated_pte;
#else #else
target_ulong old_pte = target_ulong old_pte =
atomic_cmpxchg(pte_pa, pte, updated_pte); qatomic_cmpxchg(pte_pa, pte, updated_pte);
if (old_pte != pte) { if (old_pte != pte) {
goto restart; goto restart;
} else { } else {

View file

@ -652,7 +652,7 @@ static int rmw_mip(CPURISCVState *env, int csrno, target_ulong *ret_value,
old_mip = riscv_cpu_update_mip(cpu, mask, (new_value & mask)); old_mip = riscv_cpu_update_mip(cpu, mask, (new_value & mask));
//qemu_mutex_unlock_iothread(); //qemu_mutex_unlock_iothread();
} else { } else {
old_mip = atomic_read(&env->mip); old_mip = qatomic_read(&env->mip);
} }
if (ret_value) { if (ret_value) {

View file

@ -1317,7 +1317,7 @@ void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
i2 = I3401_ADDI | rt << 31 | (addr & 0xfff) << 10 | rd << 5 | rd; i2 = I3401_ADDI | rt << 31 | (addr & 0xfff) << 10 | rd << 5 | rd;
} }
pair = (uint64_t)i2 << 32 | i1; pair = (uint64_t)i2 << 32 | i1;
atomic_set((uint64_t *)jmp_addr, pair); qatomic_set((uint64_t *)jmp_addr, pair);
flush_icache_range(jmp_addr, jmp_addr + 8); flush_icache_range(jmp_addr, jmp_addr + 8);
} }

View file

@ -251,7 +251,7 @@ static inline void tb_target_set_jmp_target(uintptr_t tc_ptr,
uintptr_t jmp_addr, uintptr_t addr) uintptr_t jmp_addr, uintptr_t addr)
{ {
/* patch the branch destination */ /* patch the branch destination */
atomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4)); qatomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4));
/* no need to flush icache explicitly */ /* no need to flush icache explicitly */
} }

View file

@ -1971,7 +1971,7 @@ void tcg_op_remove(TCGContext *s, TCGOp *op)
s->nb_ops--; s->nb_ops--;
#ifdef CONFIG_PROFILER #ifdef CONFIG_PROFILER
atomic_set(&s->prof.del_op_count, s->prof.del_op_count + 1); qatomic_set(&s->prof.del_op_count, s->prof.del_op_count + 1);
#endif #endif
} }
@ -3772,15 +3772,15 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
QTAILQ_FOREACH(op, &s->ops, link) { QTAILQ_FOREACH(op, &s->ops, link) {
n++; n++;
} }
atomic_set(&prof->op_count, prof->op_count + n); qatomic_set(&prof->op_count, prof->op_count + n);
if (n > prof->op_count_max) { if (n > prof->op_count_max) {
atomic_set(&prof->op_count_max, n); qatomic_set(&prof->op_count_max, n);
} }
n = s->nb_temps; n = s->nb_temps;
atomic_set(&prof->temp_count, prof->temp_count + n); qatomic_set(&prof->temp_count, prof->temp_count + n);
if (n > prof->temp_count_max) { if (n > prof->temp_count_max) {
atomic_set(&prof->temp_count_max, n); qatomic_set(&prof->temp_count_max, n);
} }
} }
#endif #endif
@ -3812,7 +3812,7 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
#endif #endif
#ifdef CONFIG_PROFILER #ifdef CONFIG_PROFILER
atomic_set(&prof->opt_time, prof->opt_time - profile_getclock()); qatomic_set(&prof->opt_time, prof->opt_time - profile_getclock());
#endif #endif
#ifdef USE_TCG_OPTIMIZATIONS #ifdef USE_TCG_OPTIMIZATIONS
@ -3820,8 +3820,8 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
#endif #endif
#ifdef CONFIG_PROFILER #ifdef CONFIG_PROFILER
atomic_set(&prof->opt_time, prof->opt_time + profile_getclock()); qatomic_set(&prof->opt_time, prof->opt_time + profile_getclock());
atomic_set(&prof->la_time, prof->la_time - profile_getclock()); qatomic_set(&prof->la_time, prof->la_time - profile_getclock());
#endif #endif
reachable_code_pass(s); reachable_code_pass(s);
@ -3844,7 +3844,7 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
} }
#ifdef CONFIG_PROFILER #ifdef CONFIG_PROFILER
atomic_set(&prof->la_time, prof->la_time + profile_getclock()); qatomic_set(&prof->la_time, prof->la_time + profile_getclock());
#endif #endif
#ifdef DEBUG_DISAS #ifdef DEBUG_DISAS
@ -3873,7 +3873,7 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
TCGOpcode opc = op->opc; TCGOpcode opc = op->opc;
#ifdef CONFIG_PROFILER #ifdef CONFIG_PROFILER
atomic_set(&prof->table_op_count[opc], prof->table_op_count[opc] + 1); qatomic_set(&prof->table_op_count[opc], prof->table_op_count[opc] + 1);
#endif #endif
switch (opc) { switch (opc) {

View file

@ -190,7 +190,7 @@ void bitmap_set_atomic(unsigned long *map, long start, long nr)
/* First word */ /* First word */
if (nr - bits_to_set > 0) { if (nr - bits_to_set > 0) {
atomic_or(p, mask_to_set); qatomic_or(p, mask_to_set);
nr -= bits_to_set; nr -= bits_to_set;
bits_to_set = BITS_PER_LONG; bits_to_set = BITS_PER_LONG;
mask_to_set = ~0UL; mask_to_set = ~0UL;
@ -209,9 +209,9 @@ void bitmap_set_atomic(unsigned long *map, long start, long nr)
/* Last word */ /* Last word */
if (nr) { if (nr) {
mask_to_set &= BITMAP_LAST_WORD_MASK(size); mask_to_set &= BITMAP_LAST_WORD_MASK(size);
atomic_or(p, mask_to_set); qatomic_or(p, mask_to_set);
} else { } else {
/* If we avoided the full barrier in atomic_or(), issue a /* If we avoided the full barrier in qatomic_or(), issue a
* barrier to account for the assignments in the while loop. * barrier to account for the assignments in the while loop.
*/ */
smp_mb(); smp_mb();
@ -253,7 +253,7 @@ bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr)
/* First word */ /* First word */
if (nr - bits_to_clear > 0) { if (nr - bits_to_clear > 0) {
old_bits = atomic_fetch_and(p, ~mask_to_clear); old_bits = qatomic_fetch_and(p, ~mask_to_clear);
dirty |= old_bits & mask_to_clear; dirty |= old_bits & mask_to_clear;
nr -= bits_to_clear; nr -= bits_to_clear;
bits_to_clear = BITS_PER_LONG; bits_to_clear = BITS_PER_LONG;
@ -265,7 +265,7 @@ bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr)
if (bits_to_clear == BITS_PER_LONG) { if (bits_to_clear == BITS_PER_LONG) {
while (nr >= BITS_PER_LONG) { while (nr >= BITS_PER_LONG) {
if (*p) { if (*p) {
old_bits = atomic_xchg(p, 0); old_bits = qatomic_xchg(p, 0);
dirty |= old_bits; dirty |= old_bits;
} }
nr -= BITS_PER_LONG; nr -= BITS_PER_LONG;
@ -276,7 +276,7 @@ bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr)
/* Last word */ /* Last word */
if (nr) { if (nr) {
mask_to_clear &= BITMAP_LAST_WORD_MASK(size); mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
old_bits = atomic_fetch_and(p, ~mask_to_clear); old_bits = qatomic_fetch_and(p, ~mask_to_clear);
dirty |= old_bits & mask_to_clear; dirty |= old_bits & mask_to_clear;
} else { } else {
if (!dirty) { if (!dirty) {
@ -291,7 +291,7 @@ void bitmap_copy_and_clear_atomic(unsigned long *dst, unsigned long *src,
long nr) long nr)
{ {
while (nr > 0) { while (nr > 0) {
*dst = atomic_xchg(src, 0); *dst = qatomic_xchg(src, 0);
dst++; dst++;
src++; src++;
nr -= BITS_PER_LONG; nr -= BITS_PER_LONG;