mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2025-01-03 16:35:46 +00:00
tcg: remove global exit_request
There are now only two uses of the global exit_request left. The first ensures we exit the run_loop when we first start to process pending work and in the kick handler. This is just as easily done by setting the first_cpu->exit_request flag. The second use is in the round robin kick routine. The global exit_request ensured every vCPU would set its local exit_request and cause a full exit of the loop. Now the iothread isn't being held while running we can just rely on the kick handler to push us out as intended. We lightly re-factor the main vCPU thread to ensure cpu->exit_requests cause us to exit the main loop and process any IO requests that might come along. As an cpu->exit_request may legitimately get squashed while processing the EXCP_INTERRUPT exception we also check cpu->queued_work_first to ensure queued work is expedited as soon as possible. Backports commit e5143e30fb87fbf179029387f83f98a5a9b27f19 from qemu
This commit is contained in:
parent
4d90497d14
commit
632b853761
|
@ -193,7 +193,6 @@ struct uc_struct {
|
||||||
|
|
||||||
// qemu/cpu-exec.c
|
// qemu/cpu-exec.c
|
||||||
BounceBuffer bounce;
|
BounceBuffer bounce;
|
||||||
volatile sig_atomic_t exit_request;
|
|
||||||
CPUState *tcg_current_rr_cpu;
|
CPUState *tcg_current_rr_cpu;
|
||||||
|
|
||||||
// qemu/memory.c
|
// qemu/memory.c
|
||||||
|
|
|
@ -400,15 +400,13 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
|
||||||
*tb_exit = ret & TB_EXIT_MASK;
|
*tb_exit = ret & TB_EXIT_MASK;
|
||||||
switch (*tb_exit) {
|
switch (*tb_exit) {
|
||||||
case TB_EXIT_REQUESTED:
|
case TB_EXIT_REQUESTED:
|
||||||
/* Something asked us to stop executing
|
/* Something asked us to stop executing chained TBs; just
|
||||||
* chained TBs; just continue round the main
|
* continue round the main loop. Whatever requested the exit
|
||||||
* loop. Whatever requested the exit will also
|
* will also have set something else (eg interrupt_request)
|
||||||
* have set something else (eg exit_request or
|
* which we will handle next time around the loop. But we
|
||||||
* interrupt_request) which we will handle
|
* need to ensure the tcg_exit_req read in generated code
|
||||||
* next time around the loop. But we need to
|
* comes before the next read of cpu->exit_request or
|
||||||
* ensure the zeroing of tcg_exit_req (see cpu_tb_exec)
|
* cpu->interrupt_request.
|
||||||
* comes before the next read of cpu->exit_request
|
|
||||||
* or cpu->interrupt_request.
|
|
||||||
*/
|
*/
|
||||||
smp_mb();
|
smp_mb();
|
||||||
*last_tb = NULL;
|
*last_tb = NULL;
|
||||||
|
@ -494,10 +492,6 @@ int cpu_exec(struct uc_struct *uc, CPUState *cpu)
|
||||||
atomic_mb_set(&uc->current_cpu, cpu);
|
atomic_mb_set(&uc->current_cpu, cpu);
|
||||||
atomic_mb_set(&uc->tcg_current_rr_cpu, cpu);
|
atomic_mb_set(&uc->tcg_current_rr_cpu, cpu);
|
||||||
|
|
||||||
if (unlikely(atomic_mb_read(&uc->exit_request))) {
|
|
||||||
cpu->exit_request = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
cc->cpu_exec_enter(cpu);
|
cc->cpu_exec_enter(cpu);
|
||||||
cpu->exception_index = -1;
|
cpu->exception_index = -1;
|
||||||
env->invalid_error = UC_ERR_OK;
|
env->invalid_error = UC_ERR_OK;
|
||||||
|
|
13
qemu/cpus.c
13
qemu/cpus.c
|
@ -150,7 +150,7 @@ static bool tcg_exec_all(struct uc_struct* uc)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
bool finish = false;
|
bool finish = false;
|
||||||
while (!uc->exit_request) {
|
while (!uc->cpu->exit_request) {
|
||||||
CPUState *cpu = uc->cpu;
|
CPUState *cpu = uc->cpu;
|
||||||
CPUArchState *env = cpu->env_ptr;
|
CPUArchState *env = cpu->env_ptr;
|
||||||
|
|
||||||
|
@ -165,38 +165,37 @@ static bool tcg_exec_all(struct uc_struct* uc)
|
||||||
// reset stop_request
|
// reset stop_request
|
||||||
uc->stop_request = false;
|
uc->stop_request = false;
|
||||||
} else if (uc->stop_request) {
|
} else if (uc->stop_request) {
|
||||||
//printf(">>> got STOP request!!!\n");
|
|
||||||
finish = true;
|
finish = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
// save invalid memory access error & quit
|
// save invalid memory access error & quit
|
||||||
if (env->invalid_error) {
|
if (env->invalid_error) {
|
||||||
// printf(">>> invalid memory accessed, STOP = %u!!!\n", env->invalid_error);
|
|
||||||
uc->invalid_addr = env->invalid_addr;
|
uc->invalid_addr = env->invalid_addr;
|
||||||
uc->invalid_error = env->invalid_error;
|
uc->invalid_error = env->invalid_error;
|
||||||
finish = true;
|
finish = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
// printf(">>> stop with r = %x, HLT=%x\n", r, EXCP_HLT);
|
|
||||||
if (r == EXCP_DEBUG) {
|
if (r == EXCP_DEBUG) {
|
||||||
cpu_handle_guest_debug(cpu);
|
cpu_handle_guest_debug(cpu);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (r == EXCP_HLT) {
|
if (r == EXCP_HLT) {
|
||||||
//printf(">>> got HLT!!!\n");
|
|
||||||
finish = true;
|
finish = true;
|
||||||
break;
|
break;
|
||||||
} else if (r == EXCP_ATOMIC) {
|
} else if (r == EXCP_ATOMIC) {
|
||||||
cpu_exec_step_atomic(uc, cpu);
|
cpu_exec_step_atomic(uc, cpu);
|
||||||
}
|
}
|
||||||
} else if (cpu->stop || cpu->stopped) {
|
} else if (cpu->stop || cpu->stopped) {
|
||||||
printf(">>> got stopped!!!\n");
|
printf(">>> got stopped!!!\n");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
uc->exit_request = 0;
|
|
||||||
|
if (uc->cpu && uc->cpu->exit_request) {
|
||||||
|
atomic_mb_set(&uc->cpu->exit_request, 0);
|
||||||
|
}
|
||||||
|
|
||||||
return finish;
|
return finish;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue