tcg/s390: Use softmmu fast path for unaligned accesses

Backports commit a5e39810b9088b5d20fac8e0293f281e1c8b608f from qemu
This commit is contained in:
Richard Henderson 2018-02-10 23:13:51 -05:00 committed by Lioncash
parent a3aaf5a864
commit aaf89ed84d
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7

View file

@ -1504,20 +1504,36 @@ QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1])
static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc,
int mem_index, bool is_ld)
{
TCGMemOp s_bits = opc & MO_SIZE;
uint64_t tlb_mask = TARGET_PAGE_MASK | ((1 << s_bits) - 1);
int ofs;
int s_mask = (1 << (opc & MO_SIZE)) - 1;
int ofs, a_off;
uint64_t tlb_mask;
/* For aligned accesses, we check the first byte and include the alignment
bits within the address. For unaligned access, we check that we don't
cross pages using the address of the last byte of the access. */
if ((opc & MO_AMASK) == MO_ALIGN || s_mask == 0) {
a_off = 0;
tlb_mask = TARGET_PAGE_MASK | s_mask;
} else {
a_off = s_mask;
tlb_mask = TARGET_PAGE_MASK;
}
if (facilities & FACILITY_GEN_INST_EXT) {
tcg_out_risbg(s, TCG_REG_R2, addr_reg,
64 - CPU_TLB_BITS - CPU_TLB_ENTRY_BITS,
63 - CPU_TLB_ENTRY_BITS,
64 + CPU_TLB_ENTRY_BITS - TARGET_PAGE_BITS, 1);
tgen_andi_risbg(s, TCG_REG_R3, addr_reg, tlb_mask);
if (a_off) {
tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off);
tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask);
} else {
tgen_andi_risbg(s, TCG_REG_R3, addr_reg, tlb_mask);
}
} else {
tcg_out_sh64(s, RSY_SRLG, TCG_REG_R2, addr_reg, TCG_REG_NONE,
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_R3, addr_reg);
tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off);
tgen_andi(s, TCG_TYPE_I64, TCG_REG_R2,
(CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask);