From 336833c11eef45bfe4e4641946f05abbc5a76d05 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 10 Feb 2018 20:05:58 -0500 Subject: [PATCH] tcg: Add MO_ALIGN, MO_UNALN These modifiers control, on a per-memory-op basis, whether unaligned memory accesses are allowed. The default setting reflects the target's definition of ALIGNED_ONLY. Backports commit dfb36305626636e2e07e0c5acd3a002a5419399e from qemu --- qemu/softmmu_template.h | 109 +++++++++++++++++++--------------------- qemu/tcg/tcg.h | 13 +++++ 2 files changed, 64 insertions(+), 58 deletions(-) diff --git a/qemu/softmmu_template.h b/qemu/softmmu_template.h index ca28f52d..adecbfb3 100644 --- a/qemu/softmmu_template.h +++ b/qemu/softmmu_template.h @@ -292,8 +292,8 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, /* If the TLB entry is for a different page, reload and try again. */ if ((addr & TARGET_PAGE_MASK) != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { + if ((addr & (DATA_SIZE - 1)) != 0 + && (get_memop(oi) & MO_AMASK) == MO_ALIGN) { //cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, // mmu_idx, retaddr); env->invalid_addr = addr; @@ -305,7 +305,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, cpu_exit(uc->current_cpu); return 0; } -#endif + if (!victim_tlb_hit_read(env, addr, mmu_idx, index)) { tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr); @@ -345,18 +345,18 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, DATA_TYPE res1, res2; unsigned shift; do_unaligned_access: -#ifdef ALIGNED_ONLY - //cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - // mmu_idx, retaddr); - env->invalid_addr = addr; + if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) { + //cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + // mmu_idx, retaddr); + env->invalid_addr = addr; #if defined(SOFTMMU_CODE_ACCESS) - env->invalid_error = UC_ERR_FETCH_UNALIGNED; + env->invalid_error = UC_ERR_FETCH_UNALIGNED; #else - env->invalid_error = UC_ERR_READ_UNALIGNED; -#endif - cpu_exit(uc->current_cpu); - return 0; + env->invalid_error = UC_ERR_READ_UNALIGNED; #endif + cpu_exit(uc->current_cpu); + return 0; + } addr1 = addr & ~(DATA_SIZE - 1); addr2 = addr1 + DATA_SIZE; /* Note the adjustment at the beginning of the function. @@ -371,8 +371,8 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, } /* Handle aligned access or unaligned access in the same page. */ -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { + if ((addr & (DATA_SIZE - 1)) != 0 + && (get_memop(oi) & MO_AMASK) == MO_ALIGN) { //cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, // mmu_idx, retaddr); env->invalid_addr = addr; @@ -384,7 +384,6 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, cpu_exit(uc->current_cpu); return 0; } -#endif haddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][index].addend); #if DATA_SIZE == 1 @@ -521,8 +520,8 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, /* If the TLB entry is for a different page, reload and try again. */ if ((addr & TARGET_PAGE_MASK) != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { + if ((addr & (DATA_SIZE - 1)) != 0 + && (get_memop(oi) & MO_AMASK) == MO_ALIGN) { //cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, // mmu_idx, retaddr); env->invalid_addr = addr; @@ -534,7 +533,6 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, cpu_exit(uc->current_cpu); return 0; } -#endif if (!victim_tlb_hit_read(env, addr, mmu_idx, index)) { tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr); @@ -573,18 +571,18 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, DATA_TYPE res1, res2; unsigned shift; do_unaligned_access: -#ifdef ALIGNED_ONLY - //cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - // mmu_idx, retaddr); - env->invalid_addr = addr; + if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) { + //cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + // mmu_idx, retaddr); + env->invalid_addr = addr; #if defined(SOFTMMU_CODE_ACCESS) - env->invalid_error = UC_ERR_FETCH_UNALIGNED; + env->invalid_error = UC_ERR_FETCH_UNALIGNED; #else - env->invalid_error = UC_ERR_READ_UNALIGNED; -#endif - cpu_exit(uc->current_cpu); - return 0; + env->invalid_error = UC_ERR_READ_UNALIGNED; #endif + cpu_exit(uc->current_cpu); + return 0; + } addr1 = addr & ~(DATA_SIZE - 1); addr2 = addr1 + DATA_SIZE; /* Note the adjustment at the beginning of the function. @@ -599,8 +597,8 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, } /* Handle aligned access or unaligned access in the same page. */ -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { + if ((addr & (DATA_SIZE - 1)) != 0 + && (get_memop(oi) & MO_AMASK) == MO_ALIGN) { //cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, // mmu_idx, retaddr); env->invalid_addr = addr; @@ -612,7 +610,6 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, cpu_exit(uc->current_cpu); return 0; } -#endif haddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][index].addend); res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr); @@ -750,8 +747,8 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, /* If the TLB entry is for a different page, reload and try again. */ if ((addr & TARGET_PAGE_MASK) != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { + if ((addr & (DATA_SIZE - 1)) != 0 + && (get_memop(oi) & MO_AMASK) == MO_ALIGN) { //cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, // mmu_idx, retaddr); env->invalid_addr = addr; @@ -759,7 +756,6 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, cpu_exit(uc->current_cpu); return; } -#endif if (!victim_tlb_hit_write(env, addr, mmu_idx, index)) { tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); } @@ -794,14 +790,14 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, >= TARGET_PAGE_SIZE)) { int i; do_unaligned_access: -#ifdef ALIGNED_ONLY - cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, - mmu_idx, retaddr); - env->invalid_addr = addr; - env->invalid_error = UC_ERR_WRITE_UNALIGNED; - cpu_exit(uc->current_cpu); - return; -#endif + if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) { + cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, + mmu_idx, retaddr); + env->invalid_addr = addr; + env->invalid_error = UC_ERR_WRITE_UNALIGNED; + cpu_exit(uc->current_cpu); + return; + } /* XXX: not efficient, but simple */ /* Note: relies on the fact that tlb_fill() does not remove the * previous page from the TLB cache. */ @@ -819,8 +815,8 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, } /* Handle aligned access or unaligned access in the same page. */ -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { + if ((addr & (DATA_SIZE - 1)) != 0 + && (get_memop(oi) & MO_AMASK) == MO_ALIGN) { cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); env->invalid_addr = addr; @@ -828,7 +824,6 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, cpu_exit(uc->current_cpu); return; } -#endif haddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][index].addend); #if DATA_SIZE == 1 @@ -910,8 +905,8 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, /* If the TLB entry is for a different page, reload and try again. */ if ((addr & TARGET_PAGE_MASK) != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { + if ((addr & (DATA_SIZE - 1)) != 0 + && (get_memop(oi) & MO_AMASK) == MO_ALIGN) { cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); env->invalid_addr = addr; @@ -919,7 +914,6 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, cpu_exit(uc->current_cpu); return; } -#endif if (!victim_tlb_hit_write(env, addr, mmu_idx, index)) { tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); } @@ -954,14 +948,14 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, >= TARGET_PAGE_SIZE)) { int i; do_unaligned_access: -#ifdef ALIGNED_ONLY - cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, - mmu_idx, retaddr); - env->invalid_addr = addr; - env->invalid_error = UC_ERR_WRITE_UNALIGNED; - cpu_exit(uc->current_cpu); - return; -#endif + if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) { + cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, + mmu_idx, retaddr); + env->invalid_addr = addr; + env->invalid_error = UC_ERR_WRITE_UNALIGNED; + cpu_exit(uc->current_cpu); + return; + } /* XXX: not efficient, but simple */ /* Note: relies on the fact that tlb_fill() does not remove the * previous page from the TLB cache. */ @@ -979,8 +973,8 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, } /* Handle aligned access or unaligned access in the same page. */ -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { + if ((addr & (DATA_SIZE - 1)) != 0 + && (get_memop(oi) & MO_AMASK) == MO_ALIGN) { cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); env->invalid_addr = addr; @@ -988,7 +982,6 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, cpu_exit(uc->current_cpu); return; } -#endif haddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][index].addend); glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val); diff --git a/qemu/tcg/tcg.h b/qemu/tcg/tcg.h index dd9d0e73..d86bb4f6 100644 --- a/qemu/tcg/tcg.h +++ b/qemu/tcg/tcg.h @@ -244,6 +244,19 @@ typedef enum TCGMemOp { MO_TE = MO_LE, #endif + /* MO_UNALN accesses are never checked for alignment. + MO_ALIGN accesses will result in a call to the CPU's + do_unaligned_access hook if the guest address is not aligned. + The default depends on whether the target CPU defines ALIGNED_ONLY. */ + MO_AMASK = 16, +#ifdef ALIGNED_ONLY + MO_ALIGN = 0, + MO_UNALN = MO_AMASK, +#else + MO_ALIGN = MO_AMASK, + MO_UNALN = 0, +#endif + /* Combinations of the above, for ease of use. */ MO_UB = MO_8, MO_UW = MO_16,