mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2024-12-22 20:45:29 +00:00
qemu/atomic.h: rename atomic_ to qatomic_
clang's C11 atomic_fetch_*() functions only take a C11 atomic type pointer argument. QEMU uses direct types (int, etc) and this causes a compiler error when a QEMU code calls these functions in a source file that also included <stdatomic.h> via a system header file: $ CC=clang CXX=clang++ ./configure ... && make ../util/async.c:79:17: error: address argument to atomic operation must be a pointer to _Atomic type ('unsigned int *' invalid) Avoid using atomic_*() names in QEMU's atomic.h since that namespace is used by <stdatomic.h>. Prefix QEMU's APIs with 'q' so that atomic.h and <stdatomic.h> can co-exist. I checked /usr/include on my machine and searched GitHub for existing "qatomic_" users but there seem to be none. This patch was generated using: $ git grep -h -o '\<atomic\(64\)\?_[a-z0-9_]\+' include/qemu/atomic.h | \ sort -u >/tmp/changed_identifiers $ for identifier in $(</tmp/changed_identifiers); do sed -i "s%\<$identifier\>%q$identifier%g" \ $(git grep -I -l "\<$identifier\>") done I manually fixed line-wrap issues and misaligned rST tables. Backports d73415a315471ac0b127ed3fad45c8ec5d711de1
This commit is contained in:
parent
1341de97f0
commit
320b59ddb9
|
@ -68,9 +68,9 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
|
|||
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
|
||||
DATA_TYPE ret;
|
||||
#if DATA_SIZE == 16
|
||||
ret = atomic16_cmpxchg(haddr, cmpv, newv);
|
||||
ret = qatomic16_cmpxchg(haddr, cmpv, newv);
|
||||
#else
|
||||
ret = atomic_cmpxchg__nocheck(haddr, cmpv, newv);
|
||||
ret = qatomic_cmpxchg__nocheck(haddr, cmpv, newv);
|
||||
#endif
|
||||
ATOMIC_MMU_CLEANUP;
|
||||
return ret;
|
||||
|
@ -81,7 +81,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
|
|||
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
|
||||
{
|
||||
DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP;
|
||||
val = atomic16_read(haddr);
|
||||
val = qatomic16_read(haddr);
|
||||
ATOMIC_MMU_CLEANUP;
|
||||
return val;
|
||||
}
|
||||
|
@ -90,7 +90,7 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
|
|||
ABI_TYPE val EXTRA_ARGS)
|
||||
{
|
||||
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
|
||||
atomic16_set(haddr, val);
|
||||
qatomic16_set(haddr, val);
|
||||
ATOMIC_MMU_CLEANUP;
|
||||
}
|
||||
#endif
|
||||
|
@ -99,7 +99,7 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
|
|||
ABI_TYPE val EXTRA_ARGS)
|
||||
{
|
||||
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
|
||||
DATA_TYPE ret = atomic_xchg__nocheck(haddr, val);
|
||||
DATA_TYPE ret = qatomic_xchg__nocheck(haddr, val);
|
||||
ATOMIC_MMU_CLEANUP;
|
||||
return ret;
|
||||
}
|
||||
|
@ -109,7 +109,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
|||
ABI_TYPE val EXTRA_ARGS) \
|
||||
{ \
|
||||
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
|
||||
DATA_TYPE ret = atomic_##X(haddr, val); \
|
||||
DATA_TYPE ret = qatomic_##X(haddr, val); \
|
||||
ATOMIC_MMU_CLEANUP; \
|
||||
return ret; \
|
||||
}
|
||||
|
@ -136,10 +136,10 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
|||
XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
|
||||
XDATA_TYPE cmp, old, new, val = xval; \
|
||||
smp_mb(); \
|
||||
cmp = atomic_read__nocheck(haddr); \
|
||||
cmp = qatomic_read__nocheck(haddr); \
|
||||
do { \
|
||||
old = cmp; new = FN(old, val); \
|
||||
cmp = atomic_cmpxchg__nocheck(haddr, old, new); \
|
||||
cmp = qatomic_cmpxchg__nocheck(haddr, old, new); \
|
||||
} while (cmp != old); \
|
||||
ATOMIC_MMU_CLEANUP; \
|
||||
return RET; \
|
||||
|
@ -177,9 +177,9 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
|
|||
DATA_TYPE ret;
|
||||
|
||||
#if DATA_SIZE == 16
|
||||
ret = atomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv));
|
||||
ret = qatomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv));
|
||||
#else
|
||||
ret = atomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv));
|
||||
ret = qatomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv));
|
||||
#endif
|
||||
|
||||
ATOMIC_MMU_CLEANUP;
|
||||
|
@ -191,7 +191,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
|
|||
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
|
||||
{
|
||||
DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP;
|
||||
val = atomic16_read(haddr);
|
||||
val = qatomic16_read(haddr);
|
||||
ATOMIC_MMU_CLEANUP;
|
||||
return BSWAP(val);
|
||||
}
|
||||
|
@ -201,7 +201,7 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
|
|||
{
|
||||
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
|
||||
val = BSWAP(val);
|
||||
atomic16_set(haddr, val);
|
||||
qatomic16_set(haddr, val);
|
||||
ATOMIC_MMU_CLEANUP;
|
||||
}
|
||||
#endif
|
||||
|
@ -210,7 +210,7 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
|
|||
ABI_TYPE val EXTRA_ARGS)
|
||||
{
|
||||
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
|
||||
ABI_TYPE ret = atomic_xchg__nocheck(haddr, BSWAP(val));
|
||||
ABI_TYPE ret = qatomic_xchg__nocheck(haddr, BSWAP(val));
|
||||
ATOMIC_MMU_CLEANUP;
|
||||
return BSWAP(ret);
|
||||
}
|
||||
|
@ -220,7 +220,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
|||
ABI_TYPE val EXTRA_ARGS) \
|
||||
{ \
|
||||
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
|
||||
DATA_TYPE ret = atomic_##X(haddr, BSWAP(val)); \
|
||||
DATA_TYPE ret = qatomic_##X(haddr, BSWAP(val)); \
|
||||
ATOMIC_MMU_CLEANUP; \
|
||||
return BSWAP(ret); \
|
||||
}
|
||||
|
@ -245,10 +245,10 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
|||
XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
|
||||
XDATA_TYPE ldo, ldn, old, new, val = xval; \
|
||||
smp_mb(); \
|
||||
ldn = atomic_read__nocheck(haddr); \
|
||||
ldn = qatomic_read__nocheck(haddr); \
|
||||
do { \
|
||||
ldo = ldn; old = BSWAP(ldo); new = FN(old, val); \
|
||||
ldn = atomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \
|
||||
ldn = qatomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \
|
||||
} while (ldo != ldn); \
|
||||
ATOMIC_MMU_CLEANUP; \
|
||||
return RET; \
|
||||
|
|
|
@ -81,7 +81,7 @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
|
|||
/* We were asked to stop executing TBs (probably a pending
|
||||
* interrupt. We've now stopped, so clear the flag.
|
||||
*/
|
||||
atomic_set(&cpu->tcg_exit_req, 0);
|
||||
qatomic_set(&cpu->tcg_exit_req, 0);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -234,7 +234,7 @@ static inline TranslationBlock *tb_find(CPUState *cpu,
|
|||
|
||||
mmap_unlock();
|
||||
/* We add the TB in the virtual pc hash table for the fast lookup */
|
||||
atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
|
||||
qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
|
||||
}
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
/* We don't take care of direct jumps when address mapping changes in
|
||||
|
@ -365,7 +365,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
|||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu->uc, cpu);
|
||||
|
||||
if (unlikely(atomic_read(&cpu->interrupt_request))) {
|
||||
if (unlikely(qatomic_read(&cpu->interrupt_request))) {
|
||||
int interrupt_request = cpu->interrupt_request;
|
||||
if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
|
||||
/* Mask out external interrupts for this step. */
|
||||
|
@ -420,8 +420,8 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
|||
}
|
||||
|
||||
/* Finally, check if we need to exit to the main loop. */
|
||||
if (unlikely(atomic_read(&cpu->exit_request))) {
|
||||
atomic_set(&cpu->exit_request, 0);
|
||||
if (unlikely(qatomic_read(&cpu->exit_request))) {
|
||||
qatomic_set(&cpu->exit_request, 0);
|
||||
if (cpu->exception_index == -1) {
|
||||
cpu->exception_index = EXCP_INTERRUPT;
|
||||
}
|
||||
|
@ -459,7 +459,7 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
|
|||
#ifdef CONFIG_USER_ONLY
|
||||
abort();
|
||||
#else
|
||||
int insns_left = atomic_read(&cpu_neg(cpu)->icount_decr.u32);
|
||||
int insns_left = qatomic_read(&cpu_neg(cpu)->icount_decr.u32);
|
||||
*last_tb = NULL;
|
||||
if (cpu->icount_extra && insns_left >= 0) {
|
||||
/* Refill decrementer and continue execution. */
|
||||
|
@ -539,8 +539,8 @@ int cpu_exec(struct uc_struct *uc, CPUState *cpu)
|
|||
return EXCP_HALTED;
|
||||
}
|
||||
|
||||
atomic_mb_set(&uc->current_cpu, cpu);
|
||||
atomic_mb_set(&uc->tcg_current_rr_cpu, cpu);
|
||||
qatomic_mb_set(&uc->current_cpu, cpu);
|
||||
qatomic_mb_set(&uc->tcg_current_rr_cpu, cpu);
|
||||
|
||||
if (cc->tcg_ops.cpu_exec_enter) {
|
||||
cc->tcg_ops.cpu_exec_enter(cpu);
|
||||
|
|
|
@ -241,8 +241,8 @@ static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
|
|||
#if TCG_OVERSIZED_GUEST
|
||||
tlb_entry->addr_write |= TLB_NOTDIRTY;
|
||||
#else
|
||||
atomic_set(&tlb_entry->addr_write,
|
||||
tlb_entry->addr_write | TLB_NOTDIRTY);
|
||||
qatomic_set(&tlb_entry->addr_write,
|
||||
tlb_entry->addr_write | TLB_NOTDIRTY);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
@ -514,8 +514,8 @@ static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
|
|||
#if TCG_OVERSIZED_GUEST
|
||||
return *(target_ulong *)((uintptr_t)entry + ofs);
|
||||
#else
|
||||
/* ofs might correspond to .addr_write, so use atomic_read */
|
||||
return atomic_read((target_ulong *)((uintptr_t)entry + ofs));
|
||||
/* ofs might correspond to .addr_write, so use qatomic_read */
|
||||
return qatomic_read((target_ulong *)((uintptr_t)entry + ofs));
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -528,7 +528,7 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
|
|||
for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
|
||||
CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
|
||||
|
||||
/* elt_ofs might correspond to .addr_write, so use atomic_read */
|
||||
/* elt_ofs might correspond to .addr_write, so use qatomic_read */
|
||||
target_ulong cmp = tlb_read_ofs(vtlb, elt_ofs);
|
||||
|
||||
if (cmp == page) {
|
||||
|
|
|
@ -494,26 +494,26 @@ static PageDesc *page_find_alloc(struct uc_struct *uc, tb_page_addr_t index, int
|
|||
|
||||
/* Level 2..N-1. */
|
||||
for (i = uc->v_l2_levels; i > 0; i--) {
|
||||
void **p = atomic_read(lp);
|
||||
void **p = qatomic_read(lp);
|
||||
|
||||
if (p == NULL) {
|
||||
if (!alloc) {
|
||||
return NULL;
|
||||
}
|
||||
p = g_new0(void *, V_L2_SIZE);
|
||||
atomic_set(lp, p);
|
||||
qatomic_set(lp, p);
|
||||
}
|
||||
|
||||
lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
|
||||
}
|
||||
|
||||
pd = atomic_read(lp);
|
||||
pd = qatomic_read(lp);
|
||||
if (pd == NULL) {
|
||||
if (!alloc) {
|
||||
return NULL;
|
||||
}
|
||||
pd = g_new0(PageDesc, V_L2_SIZE);
|
||||
atomic_set(lp, pd);
|
||||
qatomic_set(lp, pd);
|
||||
}
|
||||
|
||||
return pd + (index & (V_L2_SIZE - 1));
|
||||
|
@ -978,7 +978,7 @@ void tb_flush(CPUState *cpu)
|
|||
}
|
||||
|
||||
cpu_tb_jmp_cache_clear(cpu);
|
||||
atomic_mb_set(&cpu->tb_flushed, true);
|
||||
qatomic_mb_set(&cpu->tb_flushed, true);
|
||||
|
||||
tcg_ctx->tb_ctx.nb_tbs = 0;
|
||||
memset(tcg_ctx->tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx->tb_ctx.tb_phys_hash));
|
||||
|
@ -1146,7 +1146,7 @@ void tb_phys_invalidate(struct uc_struct *uc,
|
|||
uint32_t h;
|
||||
tb_page_addr_t phys_pc;
|
||||
|
||||
atomic_set(&tb->cflags, tb->cflags | CF_INVALID);
|
||||
qatomic_set(&tb->cflags, tb->cflags | CF_INVALID);
|
||||
|
||||
/* remove the TB from the hash list */
|
||||
phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
|
||||
|
@ -1167,8 +1167,8 @@ void tb_phys_invalidate(struct uc_struct *uc,
|
|||
|
||||
/* remove the TB from the hash list */
|
||||
h = tb_jmp_cache_hash_func(tb->pc);
|
||||
if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
|
||||
atomic_set(&cpu->tb_jmp_cache[h], NULL);
|
||||
if (qatomic_read(&cpu->tb_jmp_cache[h]) == tb) {
|
||||
qatomic_set(&cpu->tb_jmp_cache[h], NULL);
|
||||
}
|
||||
|
||||
/* suppress this TB from the two jump lists */
|
||||
|
@ -1936,7 +1936,7 @@ static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
|
|||
unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
|
||||
|
||||
for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
|
||||
atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
|
||||
qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2019,7 +2019,7 @@ void cpu_interrupt(CPUState *cpu, int mask)
|
|||
{
|
||||
cpu->interrupt_request |= mask;
|
||||
cpu->tcg_exit_req = 1;
|
||||
atomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
|
||||
qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
|
||||
}
|
||||
|
||||
#if 0
|
||||
|
|
|
@ -191,7 +191,7 @@ static bool tcg_exec_all(struct uc_struct* uc)
|
|||
}
|
||||
|
||||
if (uc->cpu && uc->cpu->exit_request) {
|
||||
atomic_mb_set(&uc->cpu->exit_request, 0);
|
||||
qatomic_mb_set(&uc->cpu->exit_request, 0);
|
||||
}
|
||||
|
||||
return finish;
|
||||
|
|
21
qemu/exec.c
21
qemu/exec.c
|
@ -320,7 +320,7 @@ static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
|
|||
hwaddr addr,
|
||||
bool resolve_subpage)
|
||||
{
|
||||
MemoryRegionSection *section = atomic_read(&d->mru_section);
|
||||
MemoryRegionSection *section = qatomic_read(&d->mru_section);
|
||||
subpage_t *subpage;
|
||||
bool update;
|
||||
|
||||
|
@ -336,7 +336,7 @@ static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
|
|||
section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
|
||||
}
|
||||
if (update) {
|
||||
atomic_set(&d->mru_section, section);
|
||||
qatomic_set(&d->mru_section, section);
|
||||
}
|
||||
return section;
|
||||
}
|
||||
|
@ -391,7 +391,6 @@ static MemoryRegionSection flatview_do_translate(FlatView *fv,
|
|||
AddressSpaceDispatch *d = flatview_to_dispatch(fv);
|
||||
|
||||
for (;;) {
|
||||
// Unicorn: atomic_read used instead of atomic_rcu_read
|
||||
section = address_space_translate_internal(
|
||||
flatview_to_dispatch(fv), addr, &addr,
|
||||
plen, is_mmio);
|
||||
|
@ -498,8 +497,8 @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
|
|||
MemTxAttrs attrs, int *prot)
|
||||
{
|
||||
MemoryRegionSection *section;
|
||||
// Unicorn: atomic_read used instead of atomic_rcu_read
|
||||
AddressSpaceDispatch *d = atomic_read(&cpu->cpu_ases[asidx].memory_dispatch);
|
||||
// Unicorn: qatomic_read used instead of qatomic_rcu_read
|
||||
AddressSpaceDispatch *d = qatomic_read(&cpu->cpu_ases[asidx].memory_dispatch);
|
||||
|
||||
section = address_space_translate_internal(d, addr, xlat, plen, false);
|
||||
|
||||
|
@ -1884,8 +1883,8 @@ MemoryRegionSection *iotlb_to_section(CPUState *cpu,
|
|||
{
|
||||
int asidx = cpu_asidx_from_attrs(cpu, attrs);
|
||||
CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
|
||||
// Unicorn: uses atomic_read instead of atomic_rcu_read
|
||||
AddressSpaceDispatch *d = atomic_read(&cpuas->memory_dispatch);
|
||||
// Unicorn: uses qatomic_read instead of qatomic_rcu_read
|
||||
AddressSpaceDispatch *d = qatomic_read(&cpuas->memory_dispatch);
|
||||
MemoryRegionSection *sections = d->map.sections;
|
||||
|
||||
return §ions[index & ~TARGET_PAGE_MASK];
|
||||
|
@ -1931,8 +1930,8 @@ static void tcg_commit(MemoryListener *listener)
|
|||
* may have split the RCU critical section.
|
||||
*/
|
||||
d = address_space_to_dispatch(cpuas->as);
|
||||
// Unicorn: atomic_set used instead of atomic_rcu_set
|
||||
atomic_set(&cpuas->memory_dispatch, d);
|
||||
// Unicorn: qatomic_set used instead of qatomic_rcu_set
|
||||
qatomic_set(&cpuas->memory_dispatch, d);
|
||||
tlb_flush(cpuas->cpu);
|
||||
}
|
||||
|
||||
|
@ -2356,7 +2355,7 @@ void *address_space_map(AddressSpace *as,
|
|||
l = len;
|
||||
mr = flatview_translate(fv, addr, &xlat, &l, is_write);
|
||||
if (!memory_access_is_direct(mr, is_write)) {
|
||||
if (atomic_xchg(&as->uc->bounce.in_use, true)) {
|
||||
if (qatomic_xchg(&as->uc->bounce.in_use, true)) {
|
||||
return NULL;
|
||||
}
|
||||
/* Avoid unbounded allocations */
|
||||
|
@ -2406,7 +2405,7 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
|
|||
qemu_vfree(as->uc->bounce.buffer);
|
||||
as->uc->bounce.buffer = NULL;
|
||||
memory_region_unref(as->uc->bounce.mr);
|
||||
atomic_mb_set(&as->uc->bounce.in_use, false);
|
||||
qatomic_mb_set(&as->uc->bounce.in_use, false);
|
||||
}
|
||||
|
||||
void *cpu_physical_memory_map(AddressSpace *as, hwaddr addr,
|
||||
|
|
|
@ -95,7 +95,7 @@ static inline target_ulong tlb_addr_write(const CPUTLBEntry *entry)
|
|||
#if TCG_OVERSIZED_GUEST
|
||||
return entry->addr_write;
|
||||
#else
|
||||
return atomic_read(&entry->addr_write);
|
||||
return qatomic_read(&entry->addr_write);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -341,10 +341,10 @@ struct TranslationBlock {
|
|||
uintptr_t jmp_list_first;
|
||||
};
|
||||
|
||||
/* Hide the atomic_read to make code a little easier on the eyes */
|
||||
/* Hide the qatomic_read to make code a little easier on the eyes */
|
||||
static inline uint32_t tb_cflags(const TranslationBlock *tb)
|
||||
{
|
||||
return atomic_read(&tb->cflags);
|
||||
return qatomic_read(&tb->cflags);
|
||||
}
|
||||
|
||||
/* current cflags for hashing/comparison */
|
||||
|
|
|
@ -93,8 +93,8 @@ static inline bool cpu_physical_memory_get_dirty(struct uc_struct *uc, ram_addr_
|
|||
// Unicorn: commented out
|
||||
//rcu_read_lock();
|
||||
|
||||
// Unicorn: atomic_read used instead of atomic_rcu_read
|
||||
blocks = atomic_read(&uc->ram_list.dirty_memory[client]);
|
||||
// Unicorn: qatomic_read used instead of qatomic_rcu_read
|
||||
blocks = qatomic_read(&uc->ram_list.dirty_memory[client]);
|
||||
|
||||
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
|
||||
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
|
||||
|
@ -138,8 +138,8 @@ static inline bool cpu_physical_memory_all_dirty(struct uc_struct *uc, ram_addr_
|
|||
// Unicorn: commented out
|
||||
//rcu_read_lock();
|
||||
|
||||
// Unicorn: atomic_read used instead of atomic_rcu_read
|
||||
blocks = atomic_read(&uc->ram_list.dirty_memory[client]);
|
||||
// Unicorn: qatomic_read used instead of qatomic_rcu_read
|
||||
blocks = qatomic_read(&uc->ram_list.dirty_memory[client]);
|
||||
|
||||
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
|
||||
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
|
||||
|
@ -192,8 +192,8 @@ static inline void cpu_physical_memory_set_dirty_flag(struct uc_struct *uc, ram_
|
|||
// Unicorn: commented out
|
||||
//rcu_read_lock();
|
||||
|
||||
// Unicorn: atomic_read used instead of atomic_rcu_read
|
||||
blocks = atomic_read(&uc->ram_list.dirty_memory[client]);
|
||||
// Unicorn: qatomic_read used instead of qatomic_rcu_read
|
||||
blocks = qatomic_read(&uc->ram_list.dirty_memory[client]);
|
||||
|
||||
set_bit_atomic(offset, blocks->blocks[idx]);
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ tb_lookup__cpu_state(CPUState *cpu, target_ulong *pc, target_ulong *cs_base,
|
|||
|
||||
cpu_get_tb_cpu_state(env, pc, cs_base, flags);
|
||||
hash = tb_jmp_cache_hash_func(*pc);
|
||||
tb = atomic_read(&cpu->tb_jmp_cache[hash]);
|
||||
tb = qatomic_read(&cpu->tb_jmp_cache[hash]);
|
||||
if (likely(tb &&
|
||||
tb->pc == *pc &&
|
||||
tb->cs_base == *cs_base &&
|
||||
|
@ -39,7 +39,7 @@ tb_lookup__cpu_state(CPUState *cpu, target_ulong *pc, target_ulong *cs_base,
|
|||
if (tb == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
atomic_set(&cpu->tb_jmp_cache[hash], tb);
|
||||
qatomic_set(&cpu->tb_jmp_cache[hash], tb);
|
||||
return tb;
|
||||
}
|
||||
|
||||
|
|
|
@ -133,49 +133,49 @@ void _ReadWriteBarrier(void);
|
|||
* no effect on the generated code but not using the atomic primitives
|
||||
* will get flagged by sanitizers as a violation.
|
||||
*/
|
||||
#define atomic_read__nocheck(ptr) \
|
||||
#define qatomic_read__nocheck(ptr) \
|
||||
__atomic_load_n(ptr, __ATOMIC_RELAXED)
|
||||
|
||||
#define atomic_read(ptr) \
|
||||
#define qatomic_read(ptr) \
|
||||
({ \
|
||||
QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
|
||||
atomic_read__nocheck(ptr); \
|
||||
qatomic_read__nocheck(ptr); \
|
||||
})
|
||||
|
||||
#define atomic_set__nocheck(ptr, i) \
|
||||
#define qatomic_set__nocheck(ptr, i) \
|
||||
__atomic_store_n(ptr, i, __ATOMIC_RELAXED)
|
||||
|
||||
#define atomic_set(ptr, i) do { \
|
||||
#define qatomic_set(ptr, i) do { \
|
||||
QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
|
||||
atomic_set__nocheck(ptr, i); \
|
||||
qatomic_set__nocheck(ptr, i); \
|
||||
} while(0)
|
||||
|
||||
/* See above: most compilers currently treat consume and acquire the
|
||||
* same, but this slows down atomic_rcu_read unnecessarily.
|
||||
* same, but this slows down qatomic_rcu_read unnecessarily.
|
||||
*/
|
||||
#ifdef __SANITIZE_THREAD__
|
||||
#define atomic_rcu_read__nocheck(ptr, valptr) \
|
||||
#define qatomic_rcu_read__nocheck(ptr, valptr) \
|
||||
__atomic_load(ptr, valptr, __ATOMIC_CONSUME);
|
||||
#else
|
||||
#define atomic_rcu_read__nocheck(ptr, valptr) \
|
||||
#define qatomic_rcu_read__nocheck(ptr, valptr) \
|
||||
__atomic_load(ptr, valptr, __ATOMIC_RELAXED); \
|
||||
smp_read_barrier_depends();
|
||||
#endif
|
||||
|
||||
#define atomic_rcu_read(ptr) \
|
||||
#define qatomic_rcu_read(ptr) \
|
||||
({ \
|
||||
QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
|
||||
typeof_strip_qual(*ptr) _val; \
|
||||
atomic_rcu_read__nocheck(ptr, &_val); \
|
||||
qatomic_rcu_read__nocheck(ptr, &_val); \
|
||||
_val; \
|
||||
})
|
||||
|
||||
#define atomic_rcu_set(ptr, i) do { \
|
||||
#define qatomic_rcu_set(ptr, i) do { \
|
||||
QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
|
||||
__atomic_store_n(ptr, i, __ATOMIC_RELEASE); \
|
||||
} while(0)
|
||||
|
||||
#define atomic_load_acquire(ptr) \
|
||||
#define qatomic_load_acquire(ptr) \
|
||||
({ \
|
||||
QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
|
||||
typeof_strip_qual(*ptr) _val; \
|
||||
|
@ -183,60 +183,60 @@ void _ReadWriteBarrier(void);
|
|||
_val; \
|
||||
})
|
||||
|
||||
#define atomic_store_release(ptr, i) do { \
|
||||
#define qatomic_store_release(ptr, i) do { \
|
||||
QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
|
||||
__atomic_store_n(ptr, i, __ATOMIC_RELEASE); \
|
||||
} while(0)
|
||||
|
||||
/* All the remaining operations are fully sequentially consistent */
|
||||
|
||||
#define atomic_xchg__nocheck(ptr, i) ({ \
|
||||
#define qatomic_xchg__nocheck(ptr, i) ({ \
|
||||
__atomic_exchange_n(ptr, (i), __ATOMIC_SEQ_CST); \
|
||||
})
|
||||
|
||||
#define atomic_xchg(ptr, i) ({ \
|
||||
#define qatomic_xchg(ptr, i) ({ \
|
||||
QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
|
||||
atomic_xchg__nocheck(ptr, i); \
|
||||
qatomic_xchg__nocheck(ptr, i); \
|
||||
})
|
||||
|
||||
/* Returns the eventual value, failed or not */
|
||||
#define atomic_cmpxchg__nocheck(ptr, old, new) ({ \
|
||||
#define qatomic_cmpxchg__nocheck(ptr, old, new) ({ \
|
||||
typeof_strip_qual(*ptr) _old = (old); \
|
||||
(void)__atomic_compare_exchange_n(ptr, &_old, new, false, \
|
||||
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \
|
||||
_old; \
|
||||
})
|
||||
|
||||
#define atomic_cmpxchg(ptr, old, new) ({ \
|
||||
#define qatomic_cmpxchg(ptr, old, new) ({ \
|
||||
QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
|
||||
atomic_cmpxchg__nocheck(ptr, old, new); \
|
||||
qatomic_cmpxchg__nocheck(ptr, old, new); \
|
||||
})
|
||||
|
||||
/* Provide shorter names for GCC atomic builtins, return old value */
|
||||
#define atomic_fetch_inc(ptr) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)
|
||||
#define atomic_fetch_dec(ptr) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)
|
||||
#define atomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define atomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define atomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define atomic_fetch_or(ptr, n) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define atomic_fetch_xor(ptr, n) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define qatomic_fetch_inc(ptr) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)
|
||||
#define qatomic_fetch_dec(ptr) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)
|
||||
#define qatomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define qatomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define qatomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define qatomic_fetch_or(ptr, n) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define qatomic_fetch_xor(ptr, n) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST)
|
||||
|
||||
#define atomic_inc_fetch(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST)
|
||||
#define atomic_dec_fetch(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_SEQ_CST)
|
||||
#define atomic_add_fetch(ptr, n) __atomic_add_fetch(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define atomic_sub_fetch(ptr, n) __atomic_sub_fetch(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define atomic_and_fetch(ptr, n) __atomic_and_fetch(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define atomic_or_fetch(ptr, n) __atomic_or_fetch(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define atomic_xor_fetch(ptr, n) __atomic_xor_fetch(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define qatomic_inc_fetch(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST)
|
||||
#define qatomic_dec_fetch(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_SEQ_CST)
|
||||
#define qatomic_add_fetch(ptr, n) __atomic_add_fetch(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define qatomic_sub_fetch(ptr, n) __atomic_sub_fetch(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define qatomic_and_fetch(ptr, n) __atomic_and_fetch(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define qatomic_or_fetch(ptr, n) __atomic_or_fetch(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define qatomic_xor_fetch(ptr, n) __atomic_xor_fetch(ptr, n, __ATOMIC_SEQ_CST)
|
||||
|
||||
/* And even shorter names that return void. */
|
||||
#define atomic_inc(ptr) ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST))
|
||||
#define atomic_dec(ptr) ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST))
|
||||
#define atomic_add(ptr, n) ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST))
|
||||
#define atomic_sub(ptr, n) ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST))
|
||||
#define atomic_and(ptr, n) ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST))
|
||||
#define atomic_or(ptr, n) ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST))
|
||||
#define atomic_xor(ptr, n) ((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST))
|
||||
#define qatomic_inc(ptr) ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST))
|
||||
#define qatomic_dec(ptr) ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST))
|
||||
#define qatomic_add(ptr, n) ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST))
|
||||
#define qatomic_sub(ptr, n) ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST))
|
||||
#define qatomic_and(ptr, n) ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST))
|
||||
#define qatomic_or(ptr, n) ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST))
|
||||
#define qatomic_xor(ptr, n) ((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST))
|
||||
|
||||
#else /* __ATOMIC_RELAXED */
|
||||
|
||||
|
@ -286,7 +286,7 @@ void _ReadWriteBarrier(void);
|
|||
* but it is a full barrier at the hardware level. Add a compiler barrier
|
||||
* to make it a full barrier also at the compiler level.
|
||||
*/
|
||||
#define atomic_xchg(ptr, i) (barrier(), __sync_lock_test_and_set(ptr, i))
|
||||
#define qatomic_xchg(ptr, i) (barrier(), __sync_lock_test_and_set(ptr, i))
|
||||
|
||||
#elif defined(_ARCH_PPC)
|
||||
|
||||
|
@ -339,11 +339,11 @@ void _ReadWriteBarrier(void);
|
|||
/* These will only be atomic if the processor does the fetch or store
|
||||
* in a single issue memory operation
|
||||
*/
|
||||
#define atomic_read__nocheck(p) (*(__typeof__(*(p)) volatile*) (p))
|
||||
#define atomic_set__nocheck(p, i) ((*(__typeof__(*(p)) volatile*) (p)) = (i))
|
||||
#define qatomic_read__nocheck(p) (*(__typeof__(*(p)) volatile*) (p))
|
||||
#define qatomic_set__nocheck(p, i) ((*(__typeof__(*(p)) volatile*) (p)) = (i))
|
||||
|
||||
#define atomic_read(ptr) atomic_read__nocheck(ptr)
|
||||
#define atomic_set(ptr, i) atomic_set__nocheck(ptr,i)
|
||||
#define qatomic_read(ptr) qatomic_read__nocheck(ptr)
|
||||
#define qatomic_set(ptr, i) qatomic_set__nocheck(ptr,i)
|
||||
|
||||
/**
|
||||
* atomic_rcu_read - reads a RCU-protected pointer to a local variable
|
||||
|
@ -363,8 +363,8 @@ void _ReadWriteBarrier(void);
|
|||
*
|
||||
* Should match atomic_rcu_set(), atomic_xchg(), atomic_cmpxchg().
|
||||
*/
|
||||
#define atomic_rcu_read(ptr) ({ \
|
||||
typeof(*ptr) _val = atomic_read(ptr); \
|
||||
#define qatomic_rcu_read(ptr) ({ \
|
||||
typeof(*ptr) _val = qatomic_read(ptr); \
|
||||
smp_read_barrier_depends(); \
|
||||
_val; \
|
||||
})
|
||||
|
@ -380,94 +380,71 @@ void _ReadWriteBarrier(void);
|
|||
*
|
||||
* Should match atomic_rcu_read().
|
||||
*/
|
||||
#define atomic_rcu_set(ptr, i) do { \
|
||||
#define qatomic_rcu_set(ptr, i) do { \
|
||||
smp_wmb(); \
|
||||
atomic_set(ptr, i); \
|
||||
qatomic_set(ptr, i); \
|
||||
} while (0)
|
||||
|
||||
#define atomic_load_acquire(ptr) ({ \
|
||||
typeof(*ptr) _val = atomic_read(ptr); \
|
||||
#define qatomic_load_acquire(ptr) ({ \
|
||||
typeof(*ptr) _val = qatomic_read(ptr); \
|
||||
smp_mb_acquire(); \
|
||||
_val; \
|
||||
})
|
||||
|
||||
#define atomic_store_release(ptr, i) do { \
|
||||
smp_mb_release(); \
|
||||
atomic_set(ptr, i); \
|
||||
#define qatomic_store_release(ptr, i) do { \
|
||||
smp_mb_release(); \
|
||||
qatomic_set(ptr, i); \
|
||||
} while (0)
|
||||
|
||||
#ifndef atomic_xchg
|
||||
#ifndef qatomic_xchg
|
||||
#if defined(__clang__)
|
||||
#define atomic_xchg(ptr, i) __sync_swap(ptr, i)
|
||||
#define qatomic_xchg(ptr, i) __sync_swap(ptr, i)
|
||||
#else
|
||||
/* __sync_lock_test_and_set() is documented to be an acquire barrier only. */
|
||||
#define atomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i))
|
||||
#define qatomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i))
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define atomic_xchg__nocheck atomic_xchg
|
||||
#define qatomic_xchg__nocheck atomic_xchg
|
||||
|
||||
/* Provide shorter names for GCC atomic builtins. */
|
||||
#ifdef _MSC_VER
|
||||
// these return the new value (so we make it return the previous value)
|
||||
#define atomic_fetch_inc(ptr) ((InterlockedIncrement(ptr))-1)
|
||||
#define atomic_fetch_dec(ptr) ((InterlockedDecrement(ptr))+1)
|
||||
#define atomic_fetch_add(ptr, n) ((InterlockedAdd(ptr, n))-n)
|
||||
#define atomic_fetch_sub(ptr, n) ((InterlockedAdd(ptr, -n))+n)
|
||||
#define atomic_fetch_and(ptr, n) ((InterlockedAnd(ptr, n)))
|
||||
#define atomic_fetch_or(ptr, n) ((InterlockedOr(ptr, n)))
|
||||
#define atomic_fetch_xor(ptr, n) ((InterlockedXor(ptr, n)))
|
||||
|
||||
#define atomic_inc_fetch(ptr) (InterlockedIncrement((long*)(ptr)))
|
||||
#define atomic_dec_fetch(ptr) (InterlockedDecrement((long*)(ptr)))
|
||||
#define atomic_add_fetch(ptr, n) (InterlockedExchangeAdd((long*)ptr, n) + n)
|
||||
#define atomic_sub_fetch(ptr, n) (InterlockedExchangeAdd((long*)ptr, n) - n)
|
||||
#define atomic_and_fetch(ptr, n) (InterlockedAnd((long*)ptr, n) & n)
|
||||
#define atomic_or_fetch(ptr, n) (InterlockedOr((long*)ptr, n) | n)
|
||||
#define atomic_xor_fetch(ptr, n) (InterlockedXor((long*)ptr, n) ^ n)
|
||||
|
||||
#define atomic_cmpxchg(ptr, old, new) ((InterlockedCompareExchange(ptr, old, new)))
|
||||
#define atomic_cmpxchg__nocheck(ptr, old, new) atomic_cmpxchg(ptr, old, new)
|
||||
#else
|
||||
// these return the previous value
|
||||
#define atomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1)
|
||||
#define atomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1)
|
||||
#define atomic_fetch_add(ptr, n) __sync_fetch_and_add(ptr, n)
|
||||
#define atomic_fetch_sub(ptr, n) __sync_fetch_and_sub(ptr, n)
|
||||
#define atomic_fetch_and(ptr, n) __sync_fetch_and_and(ptr, n)
|
||||
#define atomic_fetch_or(ptr, n) __sync_fetch_and_or(ptr, n)
|
||||
#define atomic_fetch_xor(ptr, n) __sync_fetch_and_xor(ptr, n)
|
||||
#define qatomic_fetch_inc(ptr) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)
|
||||
#define qatomic_fetch_dec(ptr) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)
|
||||
|
||||
#define atomic_inc_fetch(ptr) __sync_add_and_fetch(ptr, 1)
|
||||
#define atomic_dec_fetch(ptr) __sync_add_and_fetch(ptr, -1)
|
||||
#define atomic_add_fetch(ptr, n) __sync_add_and_fetch(ptr, n)
|
||||
#define atomic_sub_fetch(ptr, n) __sync_sub_and_fetch(ptr, n)
|
||||
#define atomic_and_fetch(ptr, n) __sync_and_and_fetch(ptr, n)
|
||||
#define atomic_or_fetch(ptr, n) __sync_or_and_fetch(ptr, n)
|
||||
#define atomic_xor_fetch(ptr, n) __sync_xor_and_fetch(ptr, n)
|
||||
#define qatomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define qatomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define qatomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define qatomic_fetch_or(ptr, n) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define qatomic_fetch_xor(ptr, n) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST)
|
||||
|
||||
#define atomic_cmpxchg(ptr, old, new) __sync_val_compare_and_swap(ptr, old, new)
|
||||
#define atomic_cmpxchg__nocheck(ptr, old, new) atomic_cmpxchg(ptr, old, new)
|
||||
#endif
|
||||
#define qatomic_inc_fetch(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST)
|
||||
#define qatomic_dec_fetch(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_SEQ_CST)
|
||||
#define qatomic_add_fetch(ptr, n) __atomic_add_fetch(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define qatomic_sub_fetch(ptr, n) __atomic_sub_fetch(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define qatomic_and_fetch(ptr, n) __atomic_and_fetch(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define qatomic_or_fetch(ptr, n) __atomic_or_fetch(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define qatomic_xor_fetch(ptr, n) __atomic_xor_fetch(ptr, n, __ATOMIC_SEQ_CST)
|
||||
|
||||
#define qatomic_cmpxchg(ptr, old, new) __sync_val_compare_and_swap(ptr, old, new)
|
||||
#define qatomic_cmpxchg__nocheck(ptr, old, new) qatomic_cmpxchg(ptr, old, new)
|
||||
|
||||
/* And even shorter names that return void. */
|
||||
#ifdef _MSC_VER
|
||||
#define atomic_inc(ptr) ((void) InterlockedIncrement(ptr))
|
||||
#define atomic_dec(ptr) ((void) InterlockedDecrement(ptr))
|
||||
#define atomic_add(ptr, n) ((void) InterlockedAdd(ptr, n))
|
||||
#define atomic_sub(ptr, n) ((void) InterlockedAdd(ptr, -n))
|
||||
#define atomic_and(ptr, n) ((void) InterlockedAnd(ptr, n))
|
||||
#define atomic_or(ptr, n) ((void) InterlockedOr(ptr, n))
|
||||
#define atomic_xor(ptr, n) ((void) InterlockedXor(ptr, n))
|
||||
#else
|
||||
#define atomic_inc(ptr) ((void) __sync_fetch_and_add(ptr, 1))
|
||||
#define atomic_dec(ptr) ((void) __sync_fetch_and_add(ptr, -1))
|
||||
#define atomic_add(ptr, n) ((void) __sync_fetch_and_add(ptr, n))
|
||||
#define atomic_sub(ptr, n) ((void) __sync_fetch_and_sub(ptr, n))
|
||||
#define atomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n))
|
||||
#define atomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n))
|
||||
#define atomic_xor(ptr, n) ((void) __sync_fetch_and_xor(ptr, n))
|
||||
#endif
|
||||
#define qatomic_inc(ptr) \
|
||||
((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST))
|
||||
#define qatomic_dec(ptr) \
|
||||
((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST))
|
||||
#define qatomic_add(ptr, n) \
|
||||
((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST))
|
||||
#define qatomic_sub(ptr, n) \
|
||||
((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST))
|
||||
#define qatomic_and(ptr, n) \
|
||||
((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST))
|
||||
#define qatomic_or(ptr, n) \
|
||||
((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST))
|
||||
#define qatomic_xor(ptr, n) \
|
||||
((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST))
|
||||
|
||||
#endif /* __ATOMIC_RELAXED */
|
||||
|
||||
|
@ -481,11 +458,11 @@ void _ReadWriteBarrier(void);
|
|||
/* This is more efficient than a store plus a fence. */
|
||||
#if !defined(__SANITIZE_THREAD__)
|
||||
#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
|
||||
#define atomic_mb_set(ptr, i) ((void)atomic_xchg(ptr, i))
|
||||
#define qatomic_mb_set(ptr, i) ((void)qatomic_xchg(ptr, i))
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* atomic_mb_read/set semantics map Java volatile variables. They are
|
||||
/* qatomic_mb_read/set semantics map Java volatile variables. They are
|
||||
* less expensive on some platforms (notably POWER) than fully
|
||||
* sequentially consistent operations.
|
||||
*
|
||||
|
@ -493,14 +470,14 @@ void _ReadWriteBarrier(void);
|
|||
* use. See docs/atomic.txt for more discussion.
|
||||
*/
|
||||
|
||||
#ifndef atomic_mb_read
|
||||
#define atomic_mb_read(ptr) \
|
||||
atomic_load_acquire(ptr)
|
||||
#ifndef qatomic_mb_read
|
||||
#define qatomic_mb_read(ptr) \
|
||||
qatomic_load_acquire(ptr)
|
||||
#endif
|
||||
|
||||
#ifndef atomic_mb_set
|
||||
#define atomic_mb_set(ptr, i) do { \
|
||||
atomic_store_release(ptr, i); \
|
||||
#ifndef qatomic_mb_set
|
||||
#define qatomic_mb_set(ptr, i) do { \
|
||||
qatomic_store_release(ptr, i); \
|
||||
smp_mb(); \
|
||||
} while(0)
|
||||
#endif
|
||||
|
|
|
@ -40,20 +40,20 @@
|
|||
*/
|
||||
|
||||
#if defined(CONFIG_ATOMIC128)
|
||||
static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
|
||||
static inline Int128 qatomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
|
||||
{
|
||||
return atomic_cmpxchg__nocheck(ptr, cmp, new);
|
||||
return qatomic_cmpxchg__nocheck(ptr, cmp, new);
|
||||
}
|
||||
# define HAVE_CMPXCHG128 1
|
||||
#elif defined(CONFIG_CMPXCHG128)
|
||||
static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
|
||||
static inline Int128 qatomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
|
||||
{
|
||||
return __sync_val_compare_and_swap_16(ptr, cmp, new);
|
||||
}
|
||||
# define HAVE_CMPXCHG128 1
|
||||
#elif defined(__aarch64__)
|
||||
/* Through gcc 8, aarch64 has no support for 128-bit at all. */
|
||||
static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
|
||||
static inline Int128 qatomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
|
||||
{
|
||||
uint64_t cmpl = int128_getlo(cmp), cmph = int128_gethi(cmp);
|
||||
uint64_t newl = int128_getlo(new), newh = int128_gethi(new);
|
||||
|
@ -79,26 +79,26 @@ static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
|
|||
#else
|
||||
/* Fallback definition that must be optimized away, or error. */
|
||||
Int128 QEMU_ERROR("unsupported atomic")
|
||||
atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new);
|
||||
qatomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new);
|
||||
# define HAVE_CMPXCHG128 0
|
||||
#endif /* Some definition for HAVE_CMPXCHG128 */
|
||||
|
||||
|
||||
#if defined(CONFIG_ATOMIC128)
|
||||
static inline Int128 atomic16_read(Int128 *ptr)
|
||||
static inline Int128 qatomic16_read(Int128 *ptr)
|
||||
{
|
||||
return atomic_read__nocheck(ptr);
|
||||
return qatomic_read__nocheck(ptr);
|
||||
}
|
||||
|
||||
static inline void atomic16_set(Int128 *ptr, Int128 val)
|
||||
static inline void qatomic16_set(Int128 *ptr, Int128 val)
|
||||
{
|
||||
atomic_set__nocheck(ptr, val);
|
||||
qatomic_set__nocheck(ptr, val);
|
||||
}
|
||||
|
||||
# define HAVE_ATOMIC128 1
|
||||
#elif !defined(CONFIG_USER_ONLY) && defined(__aarch64__)
|
||||
/* We can do better than cmpxchg for AArch64. */
|
||||
static inline Int128 atomic16_read(Int128 *ptr)
|
||||
static inline Int128 qatomic16_read(Int128 *ptr)
|
||||
{
|
||||
uint64_t l, h;
|
||||
uint32_t tmp;
|
||||
|
@ -112,7 +112,7 @@ static inline Int128 atomic16_read(Int128 *ptr)
|
|||
return int128_make128(l, h);
|
||||
}
|
||||
|
||||
static inline void atomic16_set(Int128 *ptr, Int128 val)
|
||||
static inline void qatomic16_set(Int128 *ptr, Int128 val)
|
||||
{
|
||||
uint64_t l = int128_getlo(val), h = int128_gethi(val);
|
||||
uint64_t t1, t2;
|
||||
|
@ -127,26 +127,26 @@ static inline void atomic16_set(Int128 *ptr, Int128 val)
|
|||
|
||||
# define HAVE_ATOMIC128 1
|
||||
#elif !defined(CONFIG_USER_ONLY) && HAVE_CMPXCHG128
|
||||
static inline Int128 atomic16_read(Int128 *ptr)
|
||||
static inline Int128 qatomic16_read(Int128 *ptr)
|
||||
{
|
||||
/* Maybe replace 0 with 0, returning the old value. */
|
||||
return atomic16_cmpxchg(ptr, 0, 0);
|
||||
return qatomic16_cmpxchg(ptr, 0, 0);
|
||||
}
|
||||
|
||||
static inline void atomic16_set(Int128 *ptr, Int128 val)
|
||||
static inline void qatomic16_set(Int128 *ptr, Int128 val)
|
||||
{
|
||||
Int128 old = *ptr, cmp;
|
||||
do {
|
||||
cmp = old;
|
||||
old = atomic16_cmpxchg(ptr, cmp, val);
|
||||
old = qatomic16_cmpxchg(ptr, cmp, val);
|
||||
} while (old != cmp);
|
||||
}
|
||||
|
||||
# define HAVE_ATOMIC128 1
|
||||
#else
|
||||
/* Fallback definitions that must be optimized away, or error. */
|
||||
Int128 QEMU_ERROR("unsupported atomic") atomic16_read(Int128 *ptr);
|
||||
void QEMU_ERROR("unsupported atomic") atomic16_set(Int128 *ptr, Int128 val);
|
||||
Int128 QEMU_ERROR("unsupported atomic") qatomic16_read(Int128 *ptr);
|
||||
void QEMU_ERROR("unsupported atomic") qatomic16_set(Int128 *ptr, Int128 val);
|
||||
# define HAVE_ATOMIC128 0
|
||||
#endif /* Some definition for HAVE_ATOMIC128 */
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@ static inline void set_bit_atomic(long nr, unsigned long *addr)
|
|||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = addr + BIT_WORD(nr);
|
||||
|
||||
atomic_or(p, mask);
|
||||
qatomic_or(p, mask);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -429,7 +429,7 @@ static inline void cpu_tb_jmp_cache_clear(CPUState *cpu)
|
|||
unsigned int i;
|
||||
|
||||
for (i = 0; i < TB_JMP_CACHE_SIZE; i++) {
|
||||
atomic_set(&cpu->tb_jmp_cache[i], NULL);
|
||||
qatomic_set(&cpu->tb_jmp_cache[i], NULL);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -334,12 +334,12 @@ static void flatview_destroy(FlatView *view)
|
|||
|
||||
static void flatview_ref(FlatView *view)
|
||||
{
|
||||
atomic_inc(&view->ref);
|
||||
qatomic_inc(&view->ref);
|
||||
}
|
||||
|
||||
static void flatview_unref(FlatView *view)
|
||||
{
|
||||
if (atomic_fetch_dec(&view->ref) == 1) {
|
||||
if (qatomic_fetch_dec(&view->ref) == 1) {
|
||||
flatview_destroy(view);
|
||||
}
|
||||
}
|
||||
|
@ -355,8 +355,8 @@ void unicorn_free_empty_flat_view(struct uc_struct *uc)
|
|||
|
||||
FlatView *address_space_to_flatview(AddressSpace *as)
|
||||
{
|
||||
// Unicorn: atomic_read used instead of atomic_rcu_read
|
||||
return atomic_read(&as->current_map);
|
||||
// Unicorn: qatomic_read used instead of qatomic_rcu_read
|
||||
return qatomic_read(&as->current_map);
|
||||
}
|
||||
|
||||
AddressSpaceDispatch *flatview_to_dispatch(FlatView *fv)
|
||||
|
@ -900,7 +900,7 @@ static void address_space_set_flatview(AddressSpace *as)
|
|||
}
|
||||
|
||||
/* Writes are protected by the BQL. */
|
||||
atomic_set(&as->current_map, new_view);
|
||||
qatomic_set(&as->current_map, new_view);
|
||||
if (old_view) {
|
||||
flatview_unref(old_view);
|
||||
}
|
||||
|
|
|
@ -87,11 +87,11 @@ void cpu_reset_interrupt(CPUState *cpu, int mask)
|
|||
|
||||
void cpu_exit(CPUState *cpu)
|
||||
{
|
||||
atomic_set(&cpu->exit_request, 1);
|
||||
qatomic_set(&cpu->exit_request, 1);
|
||||
/* Ensure cpu_exec will see the exit request after TCG has exited. */
|
||||
smp_wmb();
|
||||
atomic_set(&cpu->tcg_exit_req, 1);
|
||||
atomic_set(&cpu->icount_decr_ptr->u16.high, -1);
|
||||
qatomic_set(&cpu->tcg_exit_req, 1);
|
||||
qatomic_set(&cpu->icount_decr_ptr->u16.high, -1);
|
||||
}
|
||||
|
||||
static void cpu_common_noop(CPUState *cpu)
|
||||
|
@ -146,7 +146,7 @@ static void cpu_common_reset(CPUState *cpu)
|
|||
cpu->mem_io_pc = 0;
|
||||
cpu->mem_io_vaddr = 0;
|
||||
cpu->icount_extra = 0;
|
||||
atomic_set(&cpu->icount_decr_ptr->u32, 0);
|
||||
qatomic_set(&cpu->icount_decr_ptr->u32, 0);
|
||||
cpu->can_do_io = 0;
|
||||
cpu->exception_index = -1;
|
||||
cpu->crash_occurred = false;
|
||||
|
|
|
@ -547,7 +547,7 @@ Object *object_dynamic_cast_assert(struct uc_struct *uc, Object *obj, const char
|
|||
Object *inst;
|
||||
|
||||
for (i = 0; obj && i < OBJECT_CLASS_CAST_CACHE; i++) {
|
||||
if (atomic_read(&obj->class->object_cast_cache[i]) == typename) {
|
||||
if (qatomic_read(&obj->class->object_cast_cache[i]) == typename) {
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
@ -564,10 +564,10 @@ Object *object_dynamic_cast_assert(struct uc_struct *uc, Object *obj, const char
|
|||
|
||||
if (obj && obj == inst) {
|
||||
for (i = 1; i < OBJECT_CLASS_CAST_CACHE; i++) {
|
||||
atomic_set(&obj->class->object_cast_cache[i - 1],
|
||||
atomic_read(&obj->class->object_cast_cache[i]));
|
||||
qatomic_set(&obj->class->object_cast_cache[i - 1],
|
||||
qatomic_read(&obj->class->object_cast_cache[i]));
|
||||
}
|
||||
atomic_set(&obj->class->object_cast_cache[i - 1], typename);
|
||||
qatomic_set(&obj->class->object_cast_cache[i - 1], typename);
|
||||
}
|
||||
|
||||
out:
|
||||
|
@ -634,7 +634,7 @@ ObjectClass *object_class_dynamic_cast_assert(struct uc_struct *uc, ObjectClass
|
|||
int i;
|
||||
|
||||
for (i = 0; class && i < OBJECT_CLASS_CAST_CACHE; i++) {
|
||||
if (atomic_read(&class->class_cast_cache[i]) == typename) {
|
||||
if (qatomic_read(&class->class_cast_cache[i]) == typename) {
|
||||
ret = class;
|
||||
goto out;
|
||||
}
|
||||
|
@ -655,10 +655,10 @@ ObjectClass *object_class_dynamic_cast_assert(struct uc_struct *uc, ObjectClass
|
|||
#ifdef CONFIG_QOM_CAST_DEBUG
|
||||
if (class && ret == class) {
|
||||
for (i = 1; i < OBJECT_CLASS_CAST_CACHE; i++) {
|
||||
atomic_set(&class->class_cast_cache[i - 1],
|
||||
atomic_read(&class->class_cast_cache[i]));
|
||||
qatomic_set(&class->class_cast_cache[i - 1],
|
||||
qatomic_read(&class->class_cast_cache[i]));
|
||||
}
|
||||
atomic_set(&class->class_cast_cache[i - 1], typename);
|
||||
qatomic_set(&class->class_cast_cache[i - 1], typename);
|
||||
}
|
||||
out:
|
||||
#endif
|
||||
|
@ -826,7 +826,7 @@ void object_ref(Object *obj)
|
|||
if (!obj) {
|
||||
return;
|
||||
}
|
||||
atomic_inc(&obj->ref);
|
||||
qatomic_inc(&obj->ref);
|
||||
}
|
||||
|
||||
void object_unref(struct uc_struct *uc, Object *obj)
|
||||
|
@ -837,7 +837,7 @@ void object_unref(struct uc_struct *uc, Object *obj)
|
|||
g_assert(obj->ref > 0);
|
||||
|
||||
/* parent always holds a reference to its children */
|
||||
if (atomic_fetch_dec(&obj->ref) == 1) {
|
||||
if (qatomic_fetch_dec(&obj->ref) == 1) {
|
||||
object_finalize(uc, obj);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -314,11 +314,11 @@ static void store_tag1(uint64_t ptr, uint8_t *mem, int tag)
|
|||
static void store_tag1_parallel(uint64_t ptr, uint8_t *mem, int tag)
|
||||
{
|
||||
int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
|
||||
uint8_t old = atomic_read(mem);
|
||||
uint8_t old = qatomic_read(mem);
|
||||
|
||||
while (1) {
|
||||
uint8_t new = deposit32(old, ofs, 4, tag);
|
||||
uint8_t cmp = atomic_cmpxchg(mem, old, new);
|
||||
uint8_t cmp = qatomic_cmpxchg(mem, old, new);
|
||||
if (likely(cmp == old)) {
|
||||
return;
|
||||
}
|
||||
|
@ -399,7 +399,7 @@ static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt,
|
|||
2 * TAG_GRANULE, MMU_DATA_STORE, 1, ra);
|
||||
if (mem1) {
|
||||
tag |= tag << 4;
|
||||
atomic_set(mem1, tag);
|
||||
qatomic_set(mem1, tag);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -70,7 +70,7 @@ void helper_cmpxchg8b(CPUX86State *env, target_ulong a0)
|
|||
uint64_t *haddr = g2h(a0);
|
||||
cmpv = cpu_to_le64(cmpv);
|
||||
newv = cpu_to_le64(newv);
|
||||
oldv = atomic_cmpxchg__nocheck(haddr, cmpv, newv);
|
||||
oldv = qatomic_cmpxchg__nocheck(haddr, cmpv, newv);
|
||||
oldv = le64_to_cpu(oldv);
|
||||
}
|
||||
#else
|
||||
|
|
|
@ -229,7 +229,7 @@ static void riscv_cpu_dump_state(CPUState *cs, FILE *f,
|
|||
cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mhartid ", env->mhartid);
|
||||
cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mstatus ", env->mstatus);
|
||||
cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mip ",
|
||||
(target_ulong)atomic_read(&env->mip));
|
||||
(target_ulong)qatomic_read(&env->mip));
|
||||
cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mie ", env->mie);
|
||||
cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mideleg ", env->mideleg);
|
||||
cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "medeleg ", env->medeleg);
|
||||
|
@ -280,7 +280,7 @@ static bool riscv_cpu_has_work(CPUState *cs)
|
|||
* Definition of the WFI instruction requires it to ignore the privilege
|
||||
* mode and delegation registers, but respect individual enables
|
||||
*/
|
||||
return (atomic_read(&env->mip) & env->mie) != 0;
|
||||
return (qatomic_read(&env->mip) & env->mie) != 0;
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
|
|
|
@ -148,12 +148,12 @@ struct CPURISCVState {
|
|||
|
||||
/*
|
||||
* CAUTION! Unlike the rest of this struct, mip is accessed asynchonously
|
||||
* by I/O threads. It should be read with atomic_read. It should be updated
|
||||
* by I/O threads. It should be read with qatomic_read. It should be updated
|
||||
* using riscv_cpu_update_mip with the iothread mutex held. The iothread
|
||||
* mutex must be held because mip must be consistent with the CPU inturrept
|
||||
* state. riscv_cpu_update_mip calls cpu_interrupt or cpu_reset_interrupt
|
||||
* wuth the invariant that CPU_INTERRUPT_HARD is set iff mip is non-zero.
|
||||
* mip is 32-bits to allow atomic_read on 32-bit hosts.
|
||||
* mip is 32-bits to allow qatomic_read on 32-bit hosts.
|
||||
*/
|
||||
target_ulong mip;
|
||||
|
||||
|
|
|
@ -253,12 +253,12 @@ int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts)
|
|||
uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value)
|
||||
{
|
||||
CPURISCVState *env = &cpu->env;
|
||||
uint32_t old, new, cmp = atomic_read(&env->mip);
|
||||
uint32_t old, new, cmp = qatomic_read(&env->mip);
|
||||
|
||||
do {
|
||||
old = cmp;
|
||||
new = (old & ~mask) | (value & mask);
|
||||
cmp = atomic_cmpxchg(&env->mip, old, new);
|
||||
cmp = qatomic_cmpxchg(&env->mip, old, new);
|
||||
} while (old != cmp);
|
||||
|
||||
if (new) {
|
||||
|
@ -531,7 +531,7 @@ restart:
|
|||
*pte_pa = pte = updated_pte;
|
||||
#else
|
||||
target_ulong old_pte =
|
||||
atomic_cmpxchg(pte_pa, pte, updated_pte);
|
||||
qatomic_cmpxchg(pte_pa, pte, updated_pte);
|
||||
if (old_pte != pte) {
|
||||
goto restart;
|
||||
} else {
|
||||
|
|
|
@ -652,7 +652,7 @@ static int rmw_mip(CPURISCVState *env, int csrno, target_ulong *ret_value,
|
|||
old_mip = riscv_cpu_update_mip(cpu, mask, (new_value & mask));
|
||||
//qemu_mutex_unlock_iothread();
|
||||
} else {
|
||||
old_mip = atomic_read(&env->mip);
|
||||
old_mip = qatomic_read(&env->mip);
|
||||
}
|
||||
|
||||
if (ret_value) {
|
||||
|
|
|
@ -1317,7 +1317,7 @@ void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
|
|||
i2 = I3401_ADDI | rt << 31 | (addr & 0xfff) << 10 | rd << 5 | rd;
|
||||
}
|
||||
pair = (uint64_t)i2 << 32 | i1;
|
||||
atomic_set((uint64_t *)jmp_addr, pair);
|
||||
qatomic_set((uint64_t *)jmp_addr, pair);
|
||||
flush_icache_range(jmp_addr, jmp_addr + 8);
|
||||
}
|
||||
|
||||
|
|
|
@ -251,7 +251,7 @@ static inline void tb_target_set_jmp_target(uintptr_t tc_ptr,
|
|||
uintptr_t jmp_addr, uintptr_t addr)
|
||||
{
|
||||
/* patch the branch destination */
|
||||
atomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4));
|
||||
qatomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4));
|
||||
/* no need to flush icache explicitly */
|
||||
}
|
||||
|
||||
|
|
|
@ -1971,7 +1971,7 @@ void tcg_op_remove(TCGContext *s, TCGOp *op)
|
|||
s->nb_ops--;
|
||||
|
||||
#ifdef CONFIG_PROFILER
|
||||
atomic_set(&s->prof.del_op_count, s->prof.del_op_count + 1);
|
||||
qatomic_set(&s->prof.del_op_count, s->prof.del_op_count + 1);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -3772,15 +3772,15 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
|
|||
QTAILQ_FOREACH(op, &s->ops, link) {
|
||||
n++;
|
||||
}
|
||||
atomic_set(&prof->op_count, prof->op_count + n);
|
||||
qatomic_set(&prof->op_count, prof->op_count + n);
|
||||
if (n > prof->op_count_max) {
|
||||
atomic_set(&prof->op_count_max, n);
|
||||
qatomic_set(&prof->op_count_max, n);
|
||||
}
|
||||
|
||||
n = s->nb_temps;
|
||||
atomic_set(&prof->temp_count, prof->temp_count + n);
|
||||
qatomic_set(&prof->temp_count, prof->temp_count + n);
|
||||
if (n > prof->temp_count_max) {
|
||||
atomic_set(&prof->temp_count_max, n);
|
||||
qatomic_set(&prof->temp_count_max, n);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -3812,7 +3812,7 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_PROFILER
|
||||
atomic_set(&prof->opt_time, prof->opt_time - profile_getclock());
|
||||
qatomic_set(&prof->opt_time, prof->opt_time - profile_getclock());
|
||||
#endif
|
||||
|
||||
#ifdef USE_TCG_OPTIMIZATIONS
|
||||
|
@ -3820,8 +3820,8 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_PROFILER
|
||||
atomic_set(&prof->opt_time, prof->opt_time + profile_getclock());
|
||||
atomic_set(&prof->la_time, prof->la_time - profile_getclock());
|
||||
qatomic_set(&prof->opt_time, prof->opt_time + profile_getclock());
|
||||
qatomic_set(&prof->la_time, prof->la_time - profile_getclock());
|
||||
#endif
|
||||
|
||||
reachable_code_pass(s);
|
||||
|
@ -3844,7 +3844,7 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_PROFILER
|
||||
atomic_set(&prof->la_time, prof->la_time + profile_getclock());
|
||||
qatomic_set(&prof->la_time, prof->la_time + profile_getclock());
|
||||
#endif
|
||||
|
||||
#ifdef DEBUG_DISAS
|
||||
|
@ -3873,7 +3873,7 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
|
|||
TCGOpcode opc = op->opc;
|
||||
|
||||
#ifdef CONFIG_PROFILER
|
||||
atomic_set(&prof->table_op_count[opc], prof->table_op_count[opc] + 1);
|
||||
qatomic_set(&prof->table_op_count[opc], prof->table_op_count[opc] + 1);
|
||||
#endif
|
||||
|
||||
switch (opc) {
|
||||
|
|
|
@ -190,7 +190,7 @@ void bitmap_set_atomic(unsigned long *map, long start, long nr)
|
|||
|
||||
/* First word */
|
||||
if (nr - bits_to_set > 0) {
|
||||
atomic_or(p, mask_to_set);
|
||||
qatomic_or(p, mask_to_set);
|
||||
nr -= bits_to_set;
|
||||
bits_to_set = BITS_PER_LONG;
|
||||
mask_to_set = ~0UL;
|
||||
|
@ -209,9 +209,9 @@ void bitmap_set_atomic(unsigned long *map, long start, long nr)
|
|||
/* Last word */
|
||||
if (nr) {
|
||||
mask_to_set &= BITMAP_LAST_WORD_MASK(size);
|
||||
atomic_or(p, mask_to_set);
|
||||
qatomic_or(p, mask_to_set);
|
||||
} else {
|
||||
/* If we avoided the full barrier in atomic_or(), issue a
|
||||
/* If we avoided the full barrier in qatomic_or(), issue a
|
||||
* barrier to account for the assignments in the while loop.
|
||||
*/
|
||||
smp_mb();
|
||||
|
@ -253,7 +253,7 @@ bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr)
|
|||
|
||||
/* First word */
|
||||
if (nr - bits_to_clear > 0) {
|
||||
old_bits = atomic_fetch_and(p, ~mask_to_clear);
|
||||
old_bits = qatomic_fetch_and(p, ~mask_to_clear);
|
||||
dirty |= old_bits & mask_to_clear;
|
||||
nr -= bits_to_clear;
|
||||
bits_to_clear = BITS_PER_LONG;
|
||||
|
@ -265,7 +265,7 @@ bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr)
|
|||
if (bits_to_clear == BITS_PER_LONG) {
|
||||
while (nr >= BITS_PER_LONG) {
|
||||
if (*p) {
|
||||
old_bits = atomic_xchg(p, 0);
|
||||
old_bits = qatomic_xchg(p, 0);
|
||||
dirty |= old_bits;
|
||||
}
|
||||
nr -= BITS_PER_LONG;
|
||||
|
@ -276,7 +276,7 @@ bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr)
|
|||
/* Last word */
|
||||
if (nr) {
|
||||
mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
|
||||
old_bits = atomic_fetch_and(p, ~mask_to_clear);
|
||||
old_bits = qatomic_fetch_and(p, ~mask_to_clear);
|
||||
dirty |= old_bits & mask_to_clear;
|
||||
} else {
|
||||
if (!dirty) {
|
||||
|
@ -291,7 +291,7 @@ void bitmap_copy_and_clear_atomic(unsigned long *dst, unsigned long *src,
|
|||
long nr)
|
||||
{
|
||||
while (nr > 0) {
|
||||
*dst = atomic_xchg(src, 0);
|
||||
*dst = qatomic_xchg(src, 0);
|
||||
dst++;
|
||||
src++;
|
||||
nr -= BITS_PER_LONG;
|
||||
|
|
Loading…
Reference in a new issue