target/riscv: raise exception to HS-mode at get_physical_address

VS-stage translation at get_physical_address needs to translate pte
address by G-stage translation. But the G-stage translation error
can not be distinguished from VS-stage translation error in
riscv_cpu_tlb_fill. On migration, destination needs to rebuild pte,
and this G-stage translation error must be handled by HS-mode. So
introduce TRANSLATE_STAGE2_FAIL so that riscv_cpu_tlb_fill could
distinguish and raise it to HS-mode.

Backports 33a9a57d2c31ec9ed68858911dc490b5de15f342
This commit is contained in:
Yifei Jiang 2021-03-08 14:42:50 -05:00 committed by Lioncash
parent d2cea344f0
commit 281d851303
2 changed files with 34 additions and 12 deletions

View file

@ -86,9 +86,13 @@ enum {
#define VEXT_VERSION_0_07_1 0x00000701 #define VEXT_VERSION_0_07_1 0x00000701
#define TRANSLATE_PMP_FAIL 2 enum {
#define TRANSLATE_FAIL 1 TRANSLATE_SUCCESS,
#define TRANSLATE_SUCCESS 0 TRANSLATE_FAIL,
TRANSLATE_PMP_FAIL,
TRANSLATE_G_STAGE_FAIL
};
#define MMU_USER_IDX 3 #define MMU_USER_IDX 3
#define MAX_RISCV_PMPS (16) #define MAX_RISCV_PMPS (16)

View file

@ -310,6 +310,9 @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
* @physical: This will be set to the calculated physical address * @physical: This will be set to the calculated physical address
* @prot: The returned protection attributes * @prot: The returned protection attributes
* @addr: The virtual address to be translated * @addr: The virtual address to be translated
* @fault_pte_addr: If not NULL, this will be set to fault pte address
* when a error occurs on pte address translation.
* This will already be shifted to match htval.
* @access_type: The type of MMU access * @access_type: The type of MMU access
* @mmu_idx: Indicates current privilege level * @mmu_idx: Indicates current privilege level
* @first_stage: Are we in first stage translation? * @first_stage: Are we in first stage translation?
@ -318,6 +321,7 @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
*/ */
static int get_physical_address(CPURISCVState *env, hwaddr *physical, static int get_physical_address(CPURISCVState *env, hwaddr *physical,
int *prot, target_ulong addr, int *prot, target_ulong addr,
target_ulong *fault_pte_addr,
int access_type, int mmu_idx, int access_type, int mmu_idx,
bool first_stage, bool two_stage) bool first_stage, bool two_stage)
{ {
@ -441,11 +445,14 @@ restart:
/* Do the second stage translation on the base PTE address. */ /* Do the second stage translation on the base PTE address. */
int vbase_ret = get_physical_address(env, &vbase, &vbase_prot, int vbase_ret = get_physical_address(env, &vbase, &vbase_prot,
base, MMU_DATA_LOAD, base, NULL, MMU_DATA_LOAD,
mmu_idx, false, true); mmu_idx, false, true);
if (vbase_ret != TRANSLATE_SUCCESS) { if (vbase_ret != TRANSLATE_SUCCESS) {
return vbase_ret; if (fault_pte_addr) {
*fault_pte_addr = (base + idx * ptesize) >> 2;
}
return TRANSLATE_G_STAGE_FAIL;
} }
pte_addr = vbase + idx * ptesize; pte_addr = vbase + idx * ptesize;
@ -626,13 +633,13 @@ hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
int prot; int prot;
int mmu_idx = cpu_mmu_index(&cpu->env, false); int mmu_idx = cpu_mmu_index(&cpu->env, false);
if (get_physical_address(env, &phys_addr, &prot, addr, 0, mmu_idx, if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx,
true, riscv_cpu_virt_enabled(env))) { true, riscv_cpu_virt_enabled(env))) {
return -1; return -1;
} }
if (riscv_cpu_virt_enabled(env)) { if (riscv_cpu_virt_enabled(env)) {
if (get_physical_address(env, &phys_addr, &prot, phys_addr, if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL,
0, mmu_idx, false, true)) { 0, mmu_idx, false, true)) {
return -1; return -1;
} }
@ -718,19 +725,30 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
if (riscv_cpu_virt_enabled(env) || if (riscv_cpu_virt_enabled(env) ||
(riscv_cpu_two_stage_lookup(env) && access_type != MMU_INST_FETCH)) { (riscv_cpu_two_stage_lookup(env) && access_type != MMU_INST_FETCH)) {
/* Two stage lookup */ /* Two stage lookup */
ret = get_physical_address(env, &pa, &prot, address, access_type, ret = get_physical_address(env, &pa, &prot, address,
&env->guest_phys_fault_addr, access_type,
mmu_idx, true, true); mmu_idx, true, true);
/*
* A G-stage exception may be triggered during two state lookup.
* And the env->guest_phys_fault_addr has already been set in
* get_physical_address().
*/
if (ret == TRANSLATE_G_STAGE_FAIL) {
first_stage_error = false;
access_type = MMU_DATA_LOAD;
}
qemu_log_mask(CPU_LOG_MMU, qemu_log_mask(CPU_LOG_MMU,
"%s 1st-stage address=%" VADDR_PRIx " ret %d physical " "%s 1st-stage address=%" VADDR_PRIx " ret %d physical "
TARGET_FMT_plx " prot %d\n", TARGET_FMT_plx " prot %d\n",
__func__, address, ret, pa, prot); __func__, address, ret, pa, prot);
if (ret != TRANSLATE_FAIL) { if (ret == TRANSLATE_SUCCESS) {
/* Second stage lookup */ /* Second stage lookup */
im_address = pa; im_address = pa;
ret = get_physical_address(env, &pa, &prot2, im_address, ret = get_physical_address(env, &pa, &prot2, im_address, NULL,
access_type, mmu_idx, false, true); access_type, mmu_idx, false, true);
qemu_log_mask(CPU_LOG_MMU, qemu_log_mask(CPU_LOG_MMU,
@ -759,8 +777,8 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
} }
} else { } else {
/* Single stage lookup */ /* Single stage lookup */
ret = get_physical_address(env, &pa, &prot, address, access_type, ret = get_physical_address(env, &pa, &prot, address, NULL,
mmu_idx, true, false); access_type, mmu_idx, true, false);
qemu_log_mask(CPU_LOG_MMU, qemu_log_mask(CPU_LOG_MMU,
"%s address=%" VADDR_PRIx " ret %d physical " "%s address=%" VADDR_PRIx " ret %d physical "