diff --git a/qemu/tcg/README b/qemu/tcg/README index 27d07d9f..87f055df 100644 --- a/qemu/tcg/README +++ b/qemu/tcg/README @@ -497,6 +497,7 @@ goto_ptr opcode, emitting this op is equivalent to emitting exit_tb(0). * qemu_ld_i32/i64 t0, t1, flags, memidx * qemu_st_i32/i64 t0, t1, flags, memidx +* qemu_st8_i32 t0, t1, flags, memidx Load data at the guest address t1 into t0, or store data in t0 at guest address t1. The _i32/_i64 size applies to the size of the input/output @@ -513,6 +514,10 @@ of the memory access. For a 32-bit host, qemu_ld/st_i64 is guaranteed to only be used with a 64-bit memory access specified in flags. +For i386, qemu_st8_i32 is exactly like qemu_st_i32, except the size of +the memory operation is known to be 8-bit. This allows the backend to +provide a different set of register constraints. + ********* Host vector operations All of the vector ops have two parameters, TCGOP_VECL & TCGOP_VECE. diff --git a/qemu/tcg/aarch64/tcg-target.h b/qemu/tcg/aarch64/tcg-target.h index 663dd0b9..d1159d80 100644 --- a/qemu/tcg/aarch64/tcg-target.h +++ b/qemu/tcg/aarch64/tcg-target.h @@ -88,6 +88,7 @@ typedef enum { #define TCG_TARGET_HAS_extrl_i64_i32 0 #define TCG_TARGET_HAS_extrh_i64_i32 0 #define TCG_TARGET_HAS_goto_ptr 1 +#define TCG_TARGET_HAS_qemu_st8_i32 0 #define TCG_TARGET_HAS_div_i64 1 #define TCG_TARGET_HAS_rem_i64 1 diff --git a/qemu/tcg/arm/tcg-target.h b/qemu/tcg/arm/tcg-target.h index ad5ae5ba..f1a890d2 100644 --- a/qemu/tcg/arm/tcg-target.h +++ b/qemu/tcg/arm/tcg-target.h @@ -127,6 +127,7 @@ extern bool use_idiv_instructions_rt; #define TCG_TARGET_HAS_rem_i32 0 #define TCG_TARGET_HAS_goto_ptr 1 #define TCG_TARGET_HAS_direct_jump 0 +#define TCG_TARGET_HAS_qemu_st8_i32 0 enum { TCG_AREG0 = TCG_REG_R6, diff --git a/qemu/tcg/i386/tcg-target.h b/qemu/tcg/i386/tcg-target.h index 2fac3a91..90cbd54c 100644 --- a/qemu/tcg/i386/tcg-target.h +++ b/qemu/tcg/i386/tcg-target.h @@ -205,6 +205,9 @@ extern bool have_movbe; #define TCG_TARGET_HAS_muls2_i64 1 #define TCG_TARGET_HAS_muluh_i64 0 #define TCG_TARGET_HAS_mulsh_i64 0 +#define TCG_TARGET_HAS_qemu_st8_i32 0 +#else +#define TCG_TARGET_HAS_qemu_st8_i32 1 #endif /* We do not support older SSE systems, only beginning with AVX1. */ diff --git a/qemu/tcg/i386/tcg-target.inc.c b/qemu/tcg/i386/tcg-target.inc.c index 1b7ddcba..71d82c2f 100644 --- a/qemu/tcg/i386/tcg-target.inc.c +++ b/qemu/tcg/i386/tcg-target.inc.c @@ -252,11 +252,21 @@ static const char *target_parse_constraint(TCGArgConstraint *ct, ct->regs |= ALL_VECTOR_REGS; break; - /* qemu_ld/st address constraint */ case 'L': + /* qemu_ld/st data+address constraint */ ct->regs = TCG_TARGET_REG_BITS == 64 ? 0xffff : 0xff; +#ifdef CONFIG_SOFTMMU tcg_regset_reset_reg(ct->regs, TCG_REG_L0); tcg_regset_reset_reg(ct->regs, TCG_REG_L1); +#endif + break; + case 's': + /* qemu_st8_i32 data constraint */ + ct->regs = 0xf; +#ifdef CONFIG_SOFTMMU + tcg_regset_reset_reg(ct->regs, TCG_REG_L0); + tcg_regset_reset_reg(ct->regs, TCG_REG_L1); +#endif break; case 'e': @@ -2106,7 +2116,6 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, TCGReg base, int index, intptr_t ofs, int seg, MemOp memop) { - const TCGReg scratch = TCG_REG_L0; bool use_movbe = false; int movop = OPC_MOVL_EvGv; @@ -2122,15 +2131,8 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, switch (memop & MO_SIZE) { case MO_8: - /* - * In 32-bit mode, 8-bit stores can only happen from [abcd]x. - * TODO: Adjust constraints such that this is is forced, - * then we won't need a scratch at all for user-only. - */ - if (TCG_TARGET_REG_BITS == 32 && datalo >= 4) { - tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo); - datalo = scratch; - } + /* This is handled with constraints on INDEX_op_qemu_st8_i32. */ + tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || datalo < 4); tcg_out_modrm_sib_offset(s, OPC_MOVB_EvGv + P_REXB_R + seg, datalo, base, index, 0, ofs); break; @@ -2497,6 +2499,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_qemu_ld(s, args, 1); break; case INDEX_op_qemu_st_i32: + case INDEX_op_qemu_st8_i32: tcg_out_qemu_st(s, args, 0); break; case INDEX_op_qemu_st_i64: @@ -2955,9 +2958,11 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) static const TCGTargetOpDef r_0_ci = { .args_ct_str = { "r", "0", "ci" } }; static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } }; static const TCGTargetOpDef L_L = { .args_ct_str = { "L", "L" } }; + static const TCGTargetOpDef s_L = { .args_ct_str = { "s", "L" } }; static const TCGTargetOpDef r_L_L = { .args_ct_str = { "r", "L", "L" } }; static const TCGTargetOpDef r_r_L = { .args_ct_str = { "r", "r", "L" } }; static const TCGTargetOpDef L_L_L = { .args_ct_str = { "L", "L", "L" } }; + static const TCGTargetOpDef s_L_L = { .args_ct_str = { "s", "L", "L" } }; static const TCGTargetOpDef r_r_L_L = { .args_ct_str = { "r", "r", "L", "L" } }; static const TCGTargetOpDef L_L_L_L @@ -3151,6 +3156,8 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_L : &r_L_L; case INDEX_op_qemu_st_i32: return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &L_L : &L_L_L; + case INDEX_op_qemu_st8_i32: + return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &s_L : &s_L_L; case INDEX_op_qemu_ld_i64: return (TCG_TARGET_REG_BITS == 64 ? &r_L : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_r_L diff --git a/qemu/tcg/mips/tcg-target.h b/qemu/tcg/mips/tcg-target.h index c6b091d8..b04c12d3 100644 --- a/qemu/tcg/mips/tcg-target.h +++ b/qemu/tcg/mips/tcg-target.h @@ -169,6 +169,7 @@ extern bool use_mips32r2_instructions; #define TCG_TARGET_HAS_clz_i32 use_mips32r2_instructions #define TCG_TARGET_HAS_ctz_i32 0 #define TCG_TARGET_HAS_ctpop_i32 0 +#define TCG_TARGET_HAS_qemu_st8_i32 0 #if TCG_TARGET_REG_BITS == 64 #define TCG_TARGET_HAS_movcond_i64 use_movnz_instructions diff --git a/qemu/tcg/optimize.c b/qemu/tcg/optimize.c index 6f2a24fb..397f41a6 100644 --- a/qemu/tcg/optimize.c +++ b/qemu/tcg/optimize.c @@ -1542,6 +1542,7 @@ void tcg_optimize(TCGContext *s) case INDEX_op_qemu_ld_i32: case INDEX_op_qemu_ld_i64: case INDEX_op_qemu_st_i32: + case INDEX_op_qemu_st8_i32: case INDEX_op_qemu_st_i64: case INDEX_op_call: /* Opcodes that touch guest memory stop the optimization. */ diff --git a/qemu/tcg/s390/tcg-target.h b/qemu/tcg/s390/tcg-target.h index 07accabb..c6c15bd1 100644 --- a/qemu/tcg/s390/tcg-target.h +++ b/qemu/tcg/s390/tcg-target.h @@ -97,6 +97,7 @@ extern uint64_t s390_facilities; #define TCG_TARGET_HAS_extrh_i64_i32 0 #define TCG_TARGET_HAS_goto_ptr 1 #define TCG_TARGET_HAS_direct_jump (s390_facilities & FACILITY_GEN_INST_EXT) +#define TCG_TARGET_HAS_qemu_st8_i32 0 #define TCG_TARGET_HAS_div2_i64 1 #define TCG_TARGET_HAS_rot_i64 1 diff --git a/qemu/tcg/sparc/tcg-target.h b/qemu/tcg/sparc/tcg-target.h index 9b678117..6a3ab700 100644 --- a/qemu/tcg/sparc/tcg-target.h +++ b/qemu/tcg/sparc/tcg-target.h @@ -126,6 +126,7 @@ extern bool use_vis3_instructions; #define TCG_TARGET_HAS_mulsh_i32 0 #define TCG_TARGET_HAS_goto_ptr 1 #define TCG_TARGET_HAS_direct_jump 1 +#define TCG_TARGET_HAS_qemu_st8_i32 0 #define TCG_TARGET_HAS_extrl_i64_i32 1 #define TCG_TARGET_HAS_extrh_i64_i32 1 diff --git a/qemu/tcg/tcg-op.c b/qemu/tcg/tcg-op.c index d530738d..b5b3f626 100644 --- a/qemu/tcg/tcg-op.c +++ b/qemu/tcg/tcg-op.c @@ -2888,7 +2888,11 @@ void tcg_gen_qemu_st_i32(struct uc_struct *uc, TCGv_i32 val, TCGv addr, TCGArg i } - gen_ldst_i32(tcg_ctx, INDEX_op_qemu_st_i32, val, addr, memop, idx); + if (TCG_TARGET_HAS_qemu_st8_i32 && (memop & MO_SIZE) == MO_8) { + gen_ldst_i32(tcg_ctx, INDEX_op_qemu_st8_i32, val, addr, memop, idx); + } else { + gen_ldst_i32(tcg_ctx, INDEX_op_qemu_st_i32, val, addr, memop, idx); + } if (swap) { tcg_temp_free_i32(tcg_ctx, swap); diff --git a/qemu/tcg/tcg-opc.h b/qemu/tcg/tcg-opc.h index 16b20812..496f0ad9 100644 --- a/qemu/tcg/tcg-opc.h +++ b/qemu/tcg/tcg-opc.h @@ -207,6 +207,11 @@ DEF(qemu_ld_i64, DATA64_ARGS, TLADDR_ARGS, 1, DEF(qemu_st_i64, 0, TLADDR_ARGS + DATA64_ARGS, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT) +/* Only used by i386 to cope with stupid register constraints. */ +DEF(qemu_st8_i32, 0, TLADDR_ARGS + 1, 1, + TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | + IMPL(TCG_TARGET_HAS_qemu_st8_i32)) + /* Host vector support. */ #define IMPLVEC TCG_OPF_VECTOR | IMPL(TCG_TARGET_MAYBE_vec) diff --git a/qemu/tcg/tcg.c b/qemu/tcg/tcg.c index cb4280d6..2df17e38 100644 --- a/qemu/tcg/tcg.c +++ b/qemu/tcg/tcg.c @@ -833,6 +833,9 @@ bool tcg_op_supported(TCGOpcode op) case INDEX_op_qemu_st_i64: return true; + case INDEX_op_qemu_st8_i32: + return TCG_TARGET_HAS_qemu_st8_i32; + case INDEX_op_goto_ptr: return TCG_TARGET_HAS_goto_ptr; @@ -1486,6 +1489,7 @@ static void tcg_dump_ops(TCGContext *s, bool have_prefs) break; case INDEX_op_qemu_ld_i32: case INDEX_op_qemu_st_i32: + case INDEX_op_qemu_st8_i32: case INDEX_op_qemu_ld_i64: case INDEX_op_qemu_st_i64: { diff --git a/qemu/tcg/tcg.h b/qemu/tcg/tcg.h index b2b21e98..c4252794 100644 --- a/qemu/tcg/tcg.h +++ b/qemu/tcg/tcg.h @@ -1088,7 +1088,7 @@ static inline TCGv_ptr tcg_temp_local_new_ptr(TCGContext *s) } // UNICORN: Added -#define TCG_OP_DEFS_TABLE_SIZE 189 +#define TCG_OP_DEFS_TABLE_SIZE 190 extern const TCGOpDef tcg_op_defs_org[TCG_OP_DEFS_TABLE_SIZE]; typedef struct TCGTargetOpDef {