mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2025-02-25 23:37:01 +00:00
tcg: Add MO_ALIGN, MO_UNALN
These modifiers control, on a per-memory-op basis, whether unaligned memory accesses are allowed. The default setting reflects the target's definition of ALIGNED_ONLY. Backports commit dfb36305626636e2e07e0c5acd3a002a5419399e from qemu
This commit is contained in:
parent
ac713c7034
commit
336833c11e
|
@ -292,8 +292,8 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
|
||||||
/* If the TLB entry is for a different page, reload and try again. */
|
/* If the TLB entry is for a different page, reload and try again. */
|
||||||
if ((addr & TARGET_PAGE_MASK)
|
if ((addr & TARGET_PAGE_MASK)
|
||||||
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
||||||
#ifdef ALIGNED_ONLY
|
if ((addr & (DATA_SIZE - 1)) != 0
|
||||||
if ((addr & (DATA_SIZE - 1)) != 0) {
|
&& (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
|
||||||
//cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
//cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
||||||
// mmu_idx, retaddr);
|
// mmu_idx, retaddr);
|
||||||
env->invalid_addr = addr;
|
env->invalid_addr = addr;
|
||||||
|
@ -305,7 +305,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
|
||||||
cpu_exit(uc->current_cpu);
|
cpu_exit(uc->current_cpu);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
if (!victim_tlb_hit_read(env, addr, mmu_idx, index)) {
|
if (!victim_tlb_hit_read(env, addr, mmu_idx, index)) {
|
||||||
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
||||||
mmu_idx, retaddr);
|
mmu_idx, retaddr);
|
||||||
|
@ -345,7 +345,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
|
||||||
DATA_TYPE res1, res2;
|
DATA_TYPE res1, res2;
|
||||||
unsigned shift;
|
unsigned shift;
|
||||||
do_unaligned_access:
|
do_unaligned_access:
|
||||||
#ifdef ALIGNED_ONLY
|
if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
|
||||||
//cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
//cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
||||||
// mmu_idx, retaddr);
|
// mmu_idx, retaddr);
|
||||||
env->invalid_addr = addr;
|
env->invalid_addr = addr;
|
||||||
|
@ -356,7 +356,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
|
||||||
#endif
|
#endif
|
||||||
cpu_exit(uc->current_cpu);
|
cpu_exit(uc->current_cpu);
|
||||||
return 0;
|
return 0;
|
||||||
#endif
|
}
|
||||||
addr1 = addr & ~(DATA_SIZE - 1);
|
addr1 = addr & ~(DATA_SIZE - 1);
|
||||||
addr2 = addr1 + DATA_SIZE;
|
addr2 = addr1 + DATA_SIZE;
|
||||||
/* Note the adjustment at the beginning of the function.
|
/* Note the adjustment at the beginning of the function.
|
||||||
|
@ -371,8 +371,8 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Handle aligned access or unaligned access in the same page. */
|
/* Handle aligned access or unaligned access in the same page. */
|
||||||
#ifdef ALIGNED_ONLY
|
if ((addr & (DATA_SIZE - 1)) != 0
|
||||||
if ((addr & (DATA_SIZE - 1)) != 0) {
|
&& (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
|
||||||
//cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
//cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
||||||
// mmu_idx, retaddr);
|
// mmu_idx, retaddr);
|
||||||
env->invalid_addr = addr;
|
env->invalid_addr = addr;
|
||||||
|
@ -384,7 +384,6 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
|
||||||
cpu_exit(uc->current_cpu);
|
cpu_exit(uc->current_cpu);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
haddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][index].addend);
|
haddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][index].addend);
|
||||||
#if DATA_SIZE == 1
|
#if DATA_SIZE == 1
|
||||||
|
@ -521,8 +520,8 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
|
||||||
/* If the TLB entry is for a different page, reload and try again. */
|
/* If the TLB entry is for a different page, reload and try again. */
|
||||||
if ((addr & TARGET_PAGE_MASK)
|
if ((addr & TARGET_PAGE_MASK)
|
||||||
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
||||||
#ifdef ALIGNED_ONLY
|
if ((addr & (DATA_SIZE - 1)) != 0
|
||||||
if ((addr & (DATA_SIZE - 1)) != 0) {
|
&& (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
|
||||||
//cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
//cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
||||||
// mmu_idx, retaddr);
|
// mmu_idx, retaddr);
|
||||||
env->invalid_addr = addr;
|
env->invalid_addr = addr;
|
||||||
|
@ -534,7 +533,6 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
|
||||||
cpu_exit(uc->current_cpu);
|
cpu_exit(uc->current_cpu);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
if (!victim_tlb_hit_read(env, addr, mmu_idx, index)) {
|
if (!victim_tlb_hit_read(env, addr, mmu_idx, index)) {
|
||||||
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
||||||
mmu_idx, retaddr);
|
mmu_idx, retaddr);
|
||||||
|
@ -573,7 +571,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
|
||||||
DATA_TYPE res1, res2;
|
DATA_TYPE res1, res2;
|
||||||
unsigned shift;
|
unsigned shift;
|
||||||
do_unaligned_access:
|
do_unaligned_access:
|
||||||
#ifdef ALIGNED_ONLY
|
if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
|
||||||
//cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
//cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
||||||
// mmu_idx, retaddr);
|
// mmu_idx, retaddr);
|
||||||
env->invalid_addr = addr;
|
env->invalid_addr = addr;
|
||||||
|
@ -584,7 +582,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
|
||||||
#endif
|
#endif
|
||||||
cpu_exit(uc->current_cpu);
|
cpu_exit(uc->current_cpu);
|
||||||
return 0;
|
return 0;
|
||||||
#endif
|
}
|
||||||
addr1 = addr & ~(DATA_SIZE - 1);
|
addr1 = addr & ~(DATA_SIZE - 1);
|
||||||
addr2 = addr1 + DATA_SIZE;
|
addr2 = addr1 + DATA_SIZE;
|
||||||
/* Note the adjustment at the beginning of the function.
|
/* Note the adjustment at the beginning of the function.
|
||||||
|
@ -599,8 +597,8 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Handle aligned access or unaligned access in the same page. */
|
/* Handle aligned access or unaligned access in the same page. */
|
||||||
#ifdef ALIGNED_ONLY
|
if ((addr & (DATA_SIZE - 1)) != 0
|
||||||
if ((addr & (DATA_SIZE - 1)) != 0) {
|
&& (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
|
||||||
//cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
//cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
||||||
// mmu_idx, retaddr);
|
// mmu_idx, retaddr);
|
||||||
env->invalid_addr = addr;
|
env->invalid_addr = addr;
|
||||||
|
@ -612,7 +610,6 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
|
||||||
cpu_exit(uc->current_cpu);
|
cpu_exit(uc->current_cpu);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
haddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][index].addend);
|
haddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][index].addend);
|
||||||
res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
|
res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
|
||||||
|
@ -750,8 +747,8 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||||
/* If the TLB entry is for a different page, reload and try again. */
|
/* If the TLB entry is for a different page, reload and try again. */
|
||||||
if ((addr & TARGET_PAGE_MASK)
|
if ((addr & TARGET_PAGE_MASK)
|
||||||
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
||||||
#ifdef ALIGNED_ONLY
|
if ((addr & (DATA_SIZE - 1)) != 0
|
||||||
if ((addr & (DATA_SIZE - 1)) != 0) {
|
&& (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
|
||||||
//cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
//cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
||||||
// mmu_idx, retaddr);
|
// mmu_idx, retaddr);
|
||||||
env->invalid_addr = addr;
|
env->invalid_addr = addr;
|
||||||
|
@ -759,7 +756,6 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||||
cpu_exit(uc->current_cpu);
|
cpu_exit(uc->current_cpu);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
if (!victim_tlb_hit_write(env, addr, mmu_idx, index)) {
|
if (!victim_tlb_hit_write(env, addr, mmu_idx, index)) {
|
||||||
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
|
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
|
||||||
}
|
}
|
||||||
|
@ -794,14 +790,14 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||||
>= TARGET_PAGE_SIZE)) {
|
>= TARGET_PAGE_SIZE)) {
|
||||||
int i;
|
int i;
|
||||||
do_unaligned_access:
|
do_unaligned_access:
|
||||||
#ifdef ALIGNED_ONLY
|
if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
|
||||||
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
||||||
mmu_idx, retaddr);
|
mmu_idx, retaddr);
|
||||||
env->invalid_addr = addr;
|
env->invalid_addr = addr;
|
||||||
env->invalid_error = UC_ERR_WRITE_UNALIGNED;
|
env->invalid_error = UC_ERR_WRITE_UNALIGNED;
|
||||||
cpu_exit(uc->current_cpu);
|
cpu_exit(uc->current_cpu);
|
||||||
return;
|
return;
|
||||||
#endif
|
}
|
||||||
/* XXX: not efficient, but simple */
|
/* XXX: not efficient, but simple */
|
||||||
/* Note: relies on the fact that tlb_fill() does not remove the
|
/* Note: relies on the fact that tlb_fill() does not remove the
|
||||||
* previous page from the TLB cache. */
|
* previous page from the TLB cache. */
|
||||||
|
@ -819,8 +815,8 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Handle aligned access or unaligned access in the same page. */
|
/* Handle aligned access or unaligned access in the same page. */
|
||||||
#ifdef ALIGNED_ONLY
|
if ((addr & (DATA_SIZE - 1)) != 0
|
||||||
if ((addr & (DATA_SIZE - 1)) != 0) {
|
&& (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
|
||||||
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
||||||
mmu_idx, retaddr);
|
mmu_idx, retaddr);
|
||||||
env->invalid_addr = addr;
|
env->invalid_addr = addr;
|
||||||
|
@ -828,7 +824,6 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||||
cpu_exit(uc->current_cpu);
|
cpu_exit(uc->current_cpu);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
haddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][index].addend);
|
haddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][index].addend);
|
||||||
#if DATA_SIZE == 1
|
#if DATA_SIZE == 1
|
||||||
|
@ -910,8 +905,8 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||||
/* If the TLB entry is for a different page, reload and try again. */
|
/* If the TLB entry is for a different page, reload and try again. */
|
||||||
if ((addr & TARGET_PAGE_MASK)
|
if ((addr & TARGET_PAGE_MASK)
|
||||||
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
||||||
#ifdef ALIGNED_ONLY
|
if ((addr & (DATA_SIZE - 1)) != 0
|
||||||
if ((addr & (DATA_SIZE - 1)) != 0) {
|
&& (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
|
||||||
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
||||||
mmu_idx, retaddr);
|
mmu_idx, retaddr);
|
||||||
env->invalid_addr = addr;
|
env->invalid_addr = addr;
|
||||||
|
@ -919,7 +914,6 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||||
cpu_exit(uc->current_cpu);
|
cpu_exit(uc->current_cpu);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
if (!victim_tlb_hit_write(env, addr, mmu_idx, index)) {
|
if (!victim_tlb_hit_write(env, addr, mmu_idx, index)) {
|
||||||
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
|
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
|
||||||
}
|
}
|
||||||
|
@ -954,14 +948,14 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||||
>= TARGET_PAGE_SIZE)) {
|
>= TARGET_PAGE_SIZE)) {
|
||||||
int i;
|
int i;
|
||||||
do_unaligned_access:
|
do_unaligned_access:
|
||||||
#ifdef ALIGNED_ONLY
|
if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
|
||||||
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
||||||
mmu_idx, retaddr);
|
mmu_idx, retaddr);
|
||||||
env->invalid_addr = addr;
|
env->invalid_addr = addr;
|
||||||
env->invalid_error = UC_ERR_WRITE_UNALIGNED;
|
env->invalid_error = UC_ERR_WRITE_UNALIGNED;
|
||||||
cpu_exit(uc->current_cpu);
|
cpu_exit(uc->current_cpu);
|
||||||
return;
|
return;
|
||||||
#endif
|
}
|
||||||
/* XXX: not efficient, but simple */
|
/* XXX: not efficient, but simple */
|
||||||
/* Note: relies on the fact that tlb_fill() does not remove the
|
/* Note: relies on the fact that tlb_fill() does not remove the
|
||||||
* previous page from the TLB cache. */
|
* previous page from the TLB cache. */
|
||||||
|
@ -979,8 +973,8 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Handle aligned access or unaligned access in the same page. */
|
/* Handle aligned access or unaligned access in the same page. */
|
||||||
#ifdef ALIGNED_ONLY
|
if ((addr & (DATA_SIZE - 1)) != 0
|
||||||
if ((addr & (DATA_SIZE - 1)) != 0) {
|
&& (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
|
||||||
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
||||||
mmu_idx, retaddr);
|
mmu_idx, retaddr);
|
||||||
env->invalid_addr = addr;
|
env->invalid_addr = addr;
|
||||||
|
@ -988,7 +982,6 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||||
cpu_exit(uc->current_cpu);
|
cpu_exit(uc->current_cpu);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
haddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][index].addend);
|
haddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][index].addend);
|
||||||
glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
|
glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
|
||||||
|
|
|
@ -244,6 +244,19 @@ typedef enum TCGMemOp {
|
||||||
MO_TE = MO_LE,
|
MO_TE = MO_LE,
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* MO_UNALN accesses are never checked for alignment.
|
||||||
|
MO_ALIGN accesses will result in a call to the CPU's
|
||||||
|
do_unaligned_access hook if the guest address is not aligned.
|
||||||
|
The default depends on whether the target CPU defines ALIGNED_ONLY. */
|
||||||
|
MO_AMASK = 16,
|
||||||
|
#ifdef ALIGNED_ONLY
|
||||||
|
MO_ALIGN = 0,
|
||||||
|
MO_UNALN = MO_AMASK,
|
||||||
|
#else
|
||||||
|
MO_ALIGN = MO_AMASK,
|
||||||
|
MO_UNALN = 0,
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Combinations of the above, for ease of use. */
|
/* Combinations of the above, for ease of use. */
|
||||||
MO_UB = MO_8,
|
MO_UB = MO_8,
|
||||||
MO_UW = MO_16,
|
MO_UW = MO_16,
|
||||||
|
|
Loading…
Reference in a new issue