tcg: Improve the alignment check infrastructure

Some architectures (e.g. ARMv8) need the address which is aligned
to a size more than the size of the memory access.
To support such check it's enough the current costless alignment
check implementation in QEMU, but we need to support
an alignment size specifying.

Backports commit 1f00b27f17518a1bcb4cedca49eaec96a4d560bd from qemu
This commit is contained in:
Sergey Sorokin 2018-02-25 01:51:06 -05:00 committed by Lioncash
parent 23586e2674
commit e4d123caa9
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7
9 changed files with 182 additions and 184 deletions

View file

@ -317,14 +317,22 @@ CPUArchState *cpu_copy(CPUArchState *env);
/* memory API */ /* memory API */
/* Flags stored in the low bits of the TLB virtual address. These are /* Flags stored in the low bits of the TLB virtual address. These are
defined so that fast path ram access is all zeros. */ * defined so that fast path ram access is all zeros.
* The flags all must be between TARGET_PAGE_BITS and
* maximum address alignment bit.
*/
/* Zero if TLB entry is valid. */ /* Zero if TLB entry is valid. */
#define TLB_INVALID_MASK (1 << 3) #define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS - 1))
/* Set if TLB entry references a clean RAM page. The iotlb entry will /* Set if TLB entry references a clean RAM page. The iotlb entry will
contain the page physical address. */ contain the page physical address. */
#define TLB_NOTDIRTY (1 << 4) #define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS - 2))
/* Set if TLB entry is an IO callback. */ /* Set if TLB entry is an IO callback. */
#define TLB_MMIO (1 << 5) #define TLB_MMIO (1 << (TARGET_PAGE_BITS - 3))
/* Use this mask to check interception with an alignment mask
* in a TCG backend.
*/
#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO)
ram_addr_t last_ram_offset(struct uc_struct *uc); ram_addr_t last_ram_offset(struct uc_struct *uc);
void qemu_mutex_lock_ramlist(struct uc_struct *uc); void qemu_mutex_lock_ramlist(struct uc_struct *uc);

View file

@ -186,6 +186,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
unsigned mmu_idx = get_mmuidx(oi); unsigned mmu_idx = get_mmuidx(oi);
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
int a_bits = get_alignment_bits(get_memop(oi));
uintptr_t haddr; uintptr_t haddr;
DATA_TYPE res; DATA_TYPE res;
int error_code; int error_code;
@ -288,23 +289,14 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
/* Adjust the given return address. */ /* Adjust the given return address. */
retaddr -= GETPC_ADJ; retaddr -= GETPC_ADJ;
if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
}
/* If the TLB entry is for a different page, reload and try again. */ /* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK) if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
if ((addr & (DATA_SIZE - 1)) != 0
&& (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
//cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
// mmu_idx, retaddr);
env->invalid_addr = addr;
#if defined(SOFTMMU_CODE_ACCESS)
env->invalid_error = UC_ERR_FETCH_UNALIGNED;
#else
env->invalid_error = UC_ERR_READ_UNALIGNED;
#endif
cpu_exit(uc->current_cpu);
return 0;
}
if (!victim_tlb_hit_read(env, addr, mmu_idx, index)) { if (!victim_tlb_hit_read(env, addr, mmu_idx, index)) {
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr); mmu_idx, retaddr);
@ -344,18 +336,6 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
DATA_TYPE res1, res2; DATA_TYPE res1, res2;
unsigned shift; unsigned shift;
do_unaligned_access: do_unaligned_access:
if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
//cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
// mmu_idx, retaddr);
env->invalid_addr = addr;
#if defined(SOFTMMU_CODE_ACCESS)
env->invalid_error = UC_ERR_FETCH_UNALIGNED;
#else
env->invalid_error = UC_ERR_READ_UNALIGNED;
#endif
cpu_exit(uc->current_cpu);
return 0;
}
addr1 = addr & ~(DATA_SIZE - 1); addr1 = addr & ~(DATA_SIZE - 1);
addr2 = addr1 + DATA_SIZE; addr2 = addr1 + DATA_SIZE;
/* Note the adjustment at the beginning of the function. /* Note the adjustment at the beginning of the function.
@ -369,21 +349,6 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
goto _out; goto _out;
} }
/* Handle aligned access or unaligned access in the same page. */
if ((addr & (DATA_SIZE - 1)) != 0
&& (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
//cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
// mmu_idx, retaddr);
env->invalid_addr = addr;
#if defined(SOFTMMU_CODE_ACCESS)
env->invalid_error = UC_ERR_FETCH_UNALIGNED;
#else
env->invalid_error = UC_ERR_READ_UNALIGNED;
#endif
cpu_exit(uc->current_cpu);
return 0;
}
haddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][index].addend); haddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][index].addend);
#if DATA_SIZE == 1 #if DATA_SIZE == 1
res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr); res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr);
@ -411,6 +376,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
unsigned mmu_idx = get_mmuidx(oi); unsigned mmu_idx = get_mmuidx(oi);
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
int a_bits = get_alignment_bits(get_memop(oi));
uintptr_t haddr; uintptr_t haddr;
DATA_TYPE res; DATA_TYPE res;
int error_code; int error_code;
@ -513,22 +479,14 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
/* Adjust the given return address. */ /* Adjust the given return address. */
retaddr -= GETPC_ADJ; retaddr -= GETPC_ADJ;
if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
}
/* If the TLB entry is for a different page, reload and try again. */ /* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK) if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
if ((addr & (DATA_SIZE - 1)) != 0
&& (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
//cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
// mmu_idx, retaddr);
env->invalid_addr = addr;
#if defined(SOFTMMU_CODE_ACCESS)
env->invalid_error = UC_ERR_FETCH_UNALIGNED;
#else
env->invalid_error = UC_ERR_READ_UNALIGNED;
#endif
cpu_exit(uc->current_cpu);
return 0;
}
if (!victim_tlb_hit_read(env, addr, mmu_idx, index)) { if (!victim_tlb_hit_read(env, addr, mmu_idx, index)) {
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr); mmu_idx, retaddr);
@ -567,18 +525,6 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
DATA_TYPE res1, res2; DATA_TYPE res1, res2;
unsigned shift; unsigned shift;
do_unaligned_access: do_unaligned_access:
if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
//cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
// mmu_idx, retaddr);
env->invalid_addr = addr;
#if defined(SOFTMMU_CODE_ACCESS)
env->invalid_error = UC_ERR_FETCH_UNALIGNED;
#else
env->invalid_error = UC_ERR_READ_UNALIGNED;
#endif
cpu_exit(uc->current_cpu);
return 0;
}
addr1 = addr & ~(DATA_SIZE - 1); addr1 = addr & ~(DATA_SIZE - 1);
addr2 = addr1 + DATA_SIZE; addr2 = addr1 + DATA_SIZE;
/* Note the adjustment at the beginning of the function. /* Note the adjustment at the beginning of the function.
@ -592,21 +538,6 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
goto _out; goto _out;
} }
/* Handle aligned access or unaligned access in the same page. */
if ((addr & (DATA_SIZE - 1)) != 0
&& (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
//cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
// mmu_idx, retaddr);
env->invalid_addr = addr;
#if defined(SOFTMMU_CODE_ACCESS)
env->invalid_error = UC_ERR_FETCH_UNALIGNED;
#else
env->invalid_error = UC_ERR_READ_UNALIGNED;
#endif
cpu_exit(uc->current_cpu);
return 0;
}
haddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][index].addend); haddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][index].addend);
res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr); res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
@ -672,6 +603,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
unsigned mmu_idx = get_mmuidx(oi); unsigned mmu_idx = get_mmuidx(oi);
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
int a_bits = get_alignment_bits(get_memop(oi));
uintptr_t haddr; uintptr_t haddr;
struct hook *hook; struct hook *hook;
bool handled; bool handled;
@ -734,18 +666,14 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
/* Adjust the given return address. */ /* Adjust the given return address. */
retaddr -= GETPC_ADJ; retaddr -= GETPC_ADJ;
if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr);
}
/* If the TLB entry is for a different page, reload and try again. */ /* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK) if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
if ((addr & (DATA_SIZE - 1)) != 0
&& (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
//cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
// mmu_idx, retaddr);
env->invalid_addr = addr;
env->invalid_error = UC_ERR_WRITE_UNALIGNED;
cpu_exit(uc->current_cpu);
return;
}
if (!victim_tlb_hit_write(env, addr, mmu_idx, index)) { if (!victim_tlb_hit_write(env, addr, mmu_idx, index)) {
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
} }
@ -780,14 +708,6 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>= TARGET_PAGE_SIZE)) { >= TARGET_PAGE_SIZE)) {
int i; int i;
do_unaligned_access: do_unaligned_access:
if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr);
env->invalid_addr = addr;
env->invalid_error = UC_ERR_WRITE_UNALIGNED;
cpu_exit(uc->current_cpu);
return;
}
/* XXX: not efficient, but simple */ /* XXX: not efficient, but simple */
/* Note: relies on the fact that tlb_fill() does not remove the /* Note: relies on the fact that tlb_fill() does not remove the
* previous page from the TLB cache. */ * previous page from the TLB cache. */
@ -804,17 +724,6 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
return; return;
} }
/* Handle aligned access or unaligned access in the same page. */
if ((addr & (DATA_SIZE - 1)) != 0
&& (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr);
env->invalid_addr = addr;
env->invalid_error = UC_ERR_WRITE_UNALIGNED;
cpu_exit(uc->current_cpu);
return;
}
haddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][index].addend); haddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][index].addend);
#if DATA_SIZE == 1 #if DATA_SIZE == 1
glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val); glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
@ -830,6 +739,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
unsigned mmu_idx = get_mmuidx(oi); unsigned mmu_idx = get_mmuidx(oi);
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
int a_bits = get_alignment_bits(get_memop(oi));
uintptr_t haddr; uintptr_t haddr;
struct hook *hook; struct hook *hook;
bool handled; bool handled;
@ -892,18 +802,14 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
/* Adjust the given return address. */ /* Adjust the given return address. */
retaddr -= GETPC_ADJ; retaddr -= GETPC_ADJ;
if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr);
}
/* If the TLB entry is for a different page, reload and try again. */ /* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK) if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
if ((addr & (DATA_SIZE - 1)) != 0
&& (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr);
env->invalid_addr = addr;
env->invalid_error = UC_ERR_WRITE_UNALIGNED;
cpu_exit(uc->current_cpu);
return;
}
if (!victim_tlb_hit_write(env, addr, mmu_idx, index)) { if (!victim_tlb_hit_write(env, addr, mmu_idx, index)) {
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
} }
@ -938,14 +844,6 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>= TARGET_PAGE_SIZE)) { >= TARGET_PAGE_SIZE)) {
int i; int i;
do_unaligned_access: do_unaligned_access:
if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr);
env->invalid_addr = addr;
env->invalid_error = UC_ERR_WRITE_UNALIGNED;
cpu_exit(uc->current_cpu);
return;
}
/* XXX: not efficient, but simple */ /* XXX: not efficient, but simple */
/* Note: relies on the fact that tlb_fill() does not remove the /* Note: relies on the fact that tlb_fill() does not remove the
* previous page from the TLB cache. */ * previous page from the TLB cache. */
@ -962,17 +860,6 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
return; return;
} }
/* Handle aligned access or unaligned access in the same page. */
if ((addr & (DATA_SIZE - 1)) != 0
&& (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr);
env->invalid_addr = addr;
env->invalid_error = UC_ERR_WRITE_UNALIGNED;
cpu_exit(uc->current_cpu);
return;
}
haddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][index].addend); haddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][index].addend);
glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val); glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
} }

View file

@ -1085,19 +1085,20 @@ static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp opc,
int tlb_offset = is_read ? int tlb_offset = is_read ?
offsetof(CPUArchState, tlb_table[mem_index][0].addr_read) offsetof(CPUArchState, tlb_table[mem_index][0].addr_read)
: offsetof(CPUArchState, tlb_table[mem_index][0].addr_write); : offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
int s_mask = (1 << (opc & MO_SIZE)) - 1; int a_bits = get_alignment_bits(opc);
TCGReg base = TCG_AREG0, x3; TCGReg base = TCG_AREG0, x3;
uint64_t tlb_mask; uint64_t tlb_mask;
/* For aligned accesses, we check the first byte and include the alignment /* For aligned accesses, we check the first byte and include the alignment
bits within the address. For unaligned access, we check that we don't bits within the address. For unaligned access, we check that we don't
cross pages using the address of the last byte of the access. */ cross pages using the address of the last byte of the access. */
if ((opc & MO_AMASK) == MO_ALIGN || s_mask == 0) { if (a_bits >= 0) {
tlb_mask = TARGET_PAGE_MASK | s_mask; /* A byte access or an alignment check required */
tlb_mask = TARGET_PAGE_MASK | ((1 << a_bits) - 1);
x3 = addr_reg; x3 = addr_reg;
} else { } else {
tcg_out_insn(s, 3401, ADDI, TARGET_LONG_BITS == 64, tcg_out_insn(s, 3401, ADDI, TARGET_LONG_BITS == 64,
TCG_REG_X3, addr_reg, s_mask); TCG_REG_X3, addr_reg, (1 << (opc & MO_SIZE)) - 1);
tlb_mask = TARGET_PAGE_MASK; tlb_mask = TARGET_PAGE_MASK;
x3 = TCG_REG_X3; x3 = TCG_REG_X3;
} }

View file

@ -1304,8 +1304,8 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
TCGType ttype = TCG_TYPE_I32; TCGType ttype = TCG_TYPE_I32;
TCGType htype = TCG_TYPE_I32; TCGType htype = TCG_TYPE_I32;
int trexw = 0, hrexw = 0; int trexw = 0, hrexw = 0;
int s_mask = (1 << (opc & MO_SIZE)) - 1; int a_bits = get_alignment_bits(opc);
bool aligned = (opc & MO_AMASK) == MO_ALIGN || s_mask == 0; target_ulong tlb_mask;
if (TCG_TARGET_REG_BITS == 64) { if (TCG_TARGET_REG_BITS == 64) {
if (TARGET_LONG_BITS == 64) { if (TARGET_LONG_BITS == 64) {
@ -1319,19 +1319,22 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
} }
tcg_out_mov(s, htype, r0, addrlo); tcg_out_mov(s, htype, r0, addrlo);
if (aligned) { if (a_bits >= 0) {
/* A byte access or an alignment check required */
tcg_out_mov(s, ttype, r1, addrlo); tcg_out_mov(s, ttype, r1, addrlo);
tlb_mask = TARGET_PAGE_MASK | ((1 << a_bits) - 1);
} else { } else {
/* For unaligned access check that we don't cross pages using /* For unaligned access check that we don't cross pages using
the page address of the last byte. */ the page address of the last byte. */
tcg_out_modrm_offset(s, OPC_LEA + trexw, r1, addrlo, s_mask); tcg_out_modrm_offset(s, OPC_LEA + trexw, r1, addrlo,
(1 << (opc & MO_SIZE)) - 1);
tlb_mask = TARGET_PAGE_MASK;
} }
tcg_out_shifti(s, SHIFT_SHR + hrexw, r0, tcg_out_shifti(s, SHIFT_SHR + hrexw, r0,
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
tgen_arithi(s, ARITH_AND + trexw, r1, tgen_arithi(s, ARITH_AND + trexw, r1, tlb_mask, 0);
TARGET_PAGE_MASK | (aligned ? s_mask : 0), 0);
tgen_arithi(s, ARITH_AND + hrexw, r0, tgen_arithi(s, ARITH_AND + hrexw, r0,
(CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0); (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0);

View file

@ -1392,7 +1392,7 @@ static void * const qemu_st_helpers[16] = {
in CR7, loads the addend of the TLB into R3, and returns the register in CR7, loads the addend of the TLB into R3, and returns the register
containing the guest address (zero-extended into R4). Clobbers R0 and R2. */ containing the guest address (zero-extended into R4). Clobbers R0 and R2. */
static TCGReg tcg_out_tlb_read(TCGContext *s, TCGMemOp s_bits, static TCGReg tcg_out_tlb_read(TCGContext *s, TCGMemOp opc,
TCGReg addrlo, TCGReg addrhi, TCGReg addrlo, TCGReg addrhi,
int mem_index, bool is_read) int mem_index, bool is_read)
{ {
@ -1402,6 +1402,8 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGMemOp s_bits,
: offsetof(CPUArchState, tlb_table[mem_index][0].addr_write)); : offsetof(CPUArchState, tlb_table[mem_index][0].addr_write));
int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend); int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
TCGReg base = TCG_AREG0; TCGReg base = TCG_AREG0;
TCGMemOp s_bits = opc & MO_SIZE;
int a_bits = get_alignment_bits(opc);
/* Extract the page index, shifted into place for tlb index. */ /* Extract the page index, shifted into place for tlb index. */
if (TCG_TARGET_REG_BITS == 64) { if (TCG_TARGET_REG_BITS == 64) {
@ -1453,17 +1455,40 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGMemOp s_bits,
to minimize any load use delay. */ to minimize any load use delay. */
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_REG_R3, add_off); tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_REG_R3, add_off);
/* Clear the non-page, non-alignment bits from the address. */ /* Clear the non-page, non-alignment bits from the address */
if (TCG_TARGET_REG_BITS == 32 || TARGET_LONG_BITS == 32) { if (TCG_TARGET_REG_BITS == 32 || TARGET_LONG_BITS == 32) {
/* We don't support unaligned accesses on 32-bits, preserve
* the bottom bits and thus trigger a comparison failure on
* unaligned accesses
*/
if (a_bits < 0) {
a_bits = s_bits;
}
tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0, tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0,
(32 - s_bits) & 31, 31 - TARGET_PAGE_BITS); (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
} else if (!s_bits) { } else if (a_bits) {
tcg_out_rld(s, RLDICR, TCG_REG_R0, addrlo, /* More than byte access, we need to handle alignment */
0, 63 - TARGET_PAGE_BITS); if (a_bits > 0) {
/* Alignment required by the front-end, same as 32-bits */
tcg_out_rld(s, RLDICL, TCG_REG_R0, addrlo,
64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - a_bits);
tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0);
} else {
/* We support unaligned accesses, we need to make sure we fail
* if we cross a page boundary. The trick is to add the
* access_size-1 to the address before masking the low bits.
* That will make the address overflow to the next page if we
* cross a page boundary which will then force a mismatch of
* the TLB compare since the next page cannot possibly be in
* the same TLB index.
*/
tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, (1 << s_bits) - 1));
tcg_out_rld(s, RLDICR, TCG_REG_R0, TCG_REG_R0,
0, 63 - TARGET_PAGE_BITS);
}
} else { } else {
tcg_out_rld(s, RLDICL, TCG_REG_R0, addrlo, /* Byte access, just chop off the bits below the page index */
64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - s_bits); tcg_out_rld(s, RLDICR, TCG_REG_R0, addrlo, 0, 63 - TARGET_PAGE_BITS);
tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0);
} }
if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {

View file

@ -1511,17 +1511,19 @@ static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc,
int mem_index, bool is_ld) int mem_index, bool is_ld)
{ {
int s_mask = (1 << (opc & MO_SIZE)) - 1; int s_mask = (1 << (opc & MO_SIZE)) - 1;
int a_bits = get_alignment_bits(opc);
int ofs, a_off; int ofs, a_off;
uint64_t tlb_mask; uint64_t tlb_mask;
/* For aligned accesses, we check the first byte and include the alignment /* For aligned accesses, we check the first byte and include the alignment
bits within the address. For unaligned access, we check that we don't bits within the address. For unaligned access, we check that we don't
cross pages using the address of the last byte of the access. */ cross pages using the address of the last byte of the access. */
if ((opc & MO_AMASK) == MO_ALIGN || s_mask == 0) { if (a_bits >= 0) {
/* A byte access or an alignment check required */
a_off = 0; a_off = 0;
tlb_mask = TARGET_PAGE_MASK | s_mask; tlb_mask = TARGET_PAGE_MASK | ((1 << a_bits) - 1);
} else { } else {
a_off = s_mask; a_off = (1 << (opc & MO_SIZE)) - 1;
tlb_mask = TARGET_PAGE_MASK; tlb_mask = TARGET_PAGE_MASK;
} }

View file

@ -1851,6 +1851,9 @@ void tcg_gen_goto_tb(TCGContext *s, unsigned idx)
static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st) static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st)
{ {
/* Trigger the asserts within as early as possible. */
(void)get_alignment_bits(op);
switch (op & MO_SIZE) { switch (op & MO_SIZE) {
case MO_8: case MO_8:
op &= ~MO_BSWAP; op &= ~MO_BSWAP;

View file

@ -1045,6 +1045,21 @@ static const char * const ldst_name[] =
#endif // _MSC_VER #endif // _MSC_VER
}; };
static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = {
"",
"al2+",
"al4+",
"al8+",
"al16+",
"al32+",
"al64+",
#ifdef ALIGNED_ONLY
"un+"
#else
"al+",
#endif
};
void tcg_dump_ops(TCGContext *s) void tcg_dump_ops(TCGContext *s)
{ {
char buf[128]; char buf[128];
@ -1147,14 +1162,8 @@ void tcg_dump_ops(TCGContext *s)
if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) { if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) {
printf(",%s,%u", ldst_name[op], ix); printf(",%s,%u", ldst_name[op], ix);
} else { } else {
const char *s_al = "", *s_op; const char *s_al, *s_op;
if (op & MO_AMASK) { s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT];
if ((op & MO_AMASK) == MO_ALIGN) {
s_al = "al+";
} else {
s_al = "un+";
}
}
s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)]; s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)];
printf(",%s%s,%u", s_al, s_op, ix); printf(",%s%s,%u", s_al, s_op, ix);
} }

View file

@ -193,6 +193,14 @@ typedef uint64_t tcg_insn_unit;
/* The port better have done this. */ /* The port better have done this. */
#endif #endif
#ifdef CONFIG_DEBUG_TCG
# define tcg_debug_assert(X) do { assert(X); } while (0)
#elif QEMU_GNUC_PREREQ(4, 5)
# define tcg_debug_assert(X) \
do { if (!(X)) { __builtin_unreachable(); } } while (0)
#else
# define tcg_debug_assert(X) do { (void)(X); } while (0)
#endif
typedef struct TCGRelocation { typedef struct TCGRelocation {
struct TCGRelocation *next; struct TCGRelocation *next;
@ -278,10 +286,26 @@ typedef enum TCGMemOp {
#endif #endif
/* MO_UNALN accesses are never checked for alignment. /* MO_UNALN accesses are never checked for alignment.
MO_ALIGN accesses will result in a call to the CPU's * MO_ALIGN accesses will result in a call to the CPU's
do_unaligned_access hook if the guest address is not aligned. * do_unaligned_access hook if the guest address is not aligned.
The default depends on whether the target CPU defines ALIGNED_ONLY. */ * The default depends on whether the target CPU defines ALIGNED_ONLY.
MO_AMASK = 16, * Some architectures (e.g. ARMv8) need the address which is aligned
* to a size more than the size of the memory access.
* To support such check it's enough the current costless alignment
* check implementation in QEMU, but we need to support
* an alignment size specifying.
* MO_ALIGN supposes a natural alignment
* (i.e. the alignment size is the size of a memory access).
* Note that an alignment size must be equal or greater
* than an access size.
* There are three options:
* - an alignment to the size of an access (MO_ALIGN);
* - an alignment to the specified size that is equal or greater than
* an access size (MO_ALIGN_x where 'x' is a size in bytes);
* - unaligned access permitted (MO_UNALN).
*/
MO_ASHIFT = 4,
MO_AMASK = 7 << MO_ASHIFT,
#ifdef ALIGNED_ONLY #ifdef ALIGNED_ONLY
MO_ALIGN = 0, MO_ALIGN = 0,
MO_UNALN = MO_AMASK, MO_UNALN = MO_AMASK,
@ -289,6 +313,12 @@ typedef enum TCGMemOp {
MO_ALIGN = MO_AMASK, MO_ALIGN = MO_AMASK,
MO_UNALN = 0, MO_UNALN = 0,
#endif #endif
MO_ALIGN_2 = 1 << MO_ASHIFT,
MO_ALIGN_4 = 2 << MO_ASHIFT,
MO_ALIGN_8 = 3 << MO_ASHIFT,
MO_ALIGN_16 = 4 << MO_ASHIFT,
MO_ALIGN_32 = 5 << MO_ASHIFT,
MO_ALIGN_64 = 6 << MO_ASHIFT,
/* Combinations of the above, for ease of use. */ /* Combinations of the above, for ease of use. */
MO_UB = MO_8, MO_UB = MO_8,
@ -320,6 +350,45 @@ typedef enum TCGMemOp {
MO_SSIZE = MO_SIZE | MO_SIGN, MO_SSIZE = MO_SIZE | MO_SIGN,
} TCGMemOp; } TCGMemOp;
/**
* get_alignment_bits
* @memop: TCGMemOp value
*
* Extract the alignment size from the memop.
*
* Returns: 0 in case of byte access (which is always aligned);
* positive value - number of alignment bits;
* negative value if unaligned access enabled
* and this is not a byte access.
*/
static inline int get_alignment_bits(TCGMemOp memop)
{
int a = memop & MO_AMASK;
int s = memop & MO_SIZE;
int r;
if (a == MO_UNALN) {
/* Negative value if unaligned access enabled,
* or zero value in case of byte access.
*/
return -s;
} else if (a == MO_ALIGN) {
/* A natural alignment: return a number of access size bits */
r = s;
} else {
/* Specific alignment size. It must be equal or greater
* than the access size.
*/
r = a >> MO_ASHIFT;
tcg_debug_assert(r >= s);
}
#if defined(CONFIG_SOFTMMU)
/* The requested alignment cannot overlap the TLB flags. */
tcg_debug_assert((TLB_FLAGS_MASK & ((1 << r) - 1)) == 0);
#endif
return r;
}
typedef tcg_target_ulong TCGArg; typedef tcg_target_ulong TCGArg;
/* Define a type and accessor macros for variables. Using pointer types /* Define a type and accessor macros for variables. Using pointer types
@ -913,15 +982,6 @@ do {\
abort();\ abort();\
} while (0) } while (0)
#ifdef CONFIG_DEBUG_TCG
# define tcg_debug_assert(X) do { assert(X); } while (0)
#elif QEMU_GNUC_PREREQ(4, 5)
# define tcg_debug_assert(X) \
do { if (!(X)) { __builtin_unreachable(); } } while (0)
#else
# define tcg_debug_assert(X) do { (void)(X); } while (0)
#endif
void tcg_add_target_add_op_defs(TCGContext *s, const TCGTargetOpDef *tdefs); void tcg_add_target_add_op_defs(TCGContext *s, const TCGTargetOpDef *tdefs);
#if UINTPTR_MAX == UINT32_MAX #if UINTPTR_MAX == UINT32_MAX