memory: use atomic ops for setting dirty memory bits

Use set_bit_atomic() and bitmap_set_atomic() so that multiple threads
can dirty memory without race conditions.

Backports commit d114875b9a1c21162f69a12d72f69a22e7bab376 from qemu
This commit is contained in:
Stefan Hajnoczi 2018-02-13 11:06:45 -05:00 committed by Lioncash
parent 6d509f7333
commit 18ccd4b5be
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7

View file

@ -92,7 +92,7 @@ static inline void cpu_physical_memory_set_dirty_flag(struct uc_struct *uc, ram_
unsigned client)
{
assert(client < DIRTY_MEMORY_NUM);
set_bit(addr >> TARGET_PAGE_BITS, uc->ram_list.dirty_memory[client]);
set_bit_atomic(addr >> TARGET_PAGE_BITS, uc->ram_list.dirty_memory[client]);
}
static inline void cpu_physical_memory_set_dirty_range(struct uc_struct *uc, ram_addr_t start,
@ -100,11 +100,12 @@ static inline void cpu_physical_memory_set_dirty_range(struct uc_struct *uc, ram
uint8_t mask)
{
unsigned long end, page;
unsigned long **d = uc->ram_list.dirty_memory;
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
bitmap_set(uc->ram_list.dirty_memory[DIRTY_MEMORY_CODE], page, end - page);
bitmap_set_atomic(d[DIRTY_MEMORY_CODE], page, end - page);
}
}
@ -131,8 +132,10 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(struct uc_struct *uc,
for (k = 0; k < nr; k++) {
if (bitmap[k]) {
unsigned long temp = leul_to_cpu(bitmap[k]);
unsigned long **d = uc->ram_list.dirty_memory;
if (tcg_enabled(uc)) {
uc->ram_list.dirty_memory[DIRTY_MEMORY_CODE][page + k] |= temp;
atomic_or(&d[DIRTY_MEMORY_CODE][page + k], temp);
}
}
}