cpu: atomically modify cpu->exit_request

ThreadSanitizer picks up potential races although we already use
barriers to ensure things are in the correct order when processing exit
requests. For true C11 defined behaviour across threads we need to use
relaxed atomic_set/atomic_read semantics to reassure tsan.

Backports commit 027d9a7d2911e993cdcbd21c7c35d1dd058f05bb from qemu
This commit is contained in:
Alex Bennée 2018-02-26 05:11:14 -05:00 committed by Lioncash
parent e1cf9ca84a
commit d4cb954102
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7
2 changed files with 3 additions and 3 deletions

View file

@ -78,7 +78,7 @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
/* We were asked to stop executing TBs (probably a pending
* interrupt. We've now stopped, so clear the flag.
*/
cpu->tcg_exit_req = 0;
atomic_set(&cpu->tcg_exit_req, 0);
}
return ret;
}

View file

@ -108,10 +108,10 @@ void cpu_reset_interrupt(CPUState *cpu, int mask)
void cpu_exit(CPUState *cpu)
{
cpu->exit_request = 1;
atomic_set(&cpu->exit_request, 1);
/* Ensure cpu_exec will see the exit request after TCG has exited. */
smp_wmb();
cpu->tcg_exit_req = 1;
atomic_set(&cpu->tcg_exit_req, 1);
}
static void cpu_common_noop(CPUState *cpu)