From b704b6c205849f312314e8217e93a7eb3c18a3e3 Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Thu, 4 Oct 2018 03:35:07 -0400 Subject: [PATCH] target/i386: move cpu_T1 to DisasContext Backports commit b48597b0eda32d4c7ade2ba3f98f06f62289e3e2 from qemu --- qemu/target/i386/translate.c | 362 ++++++++++++++++------------------- qemu/tcg/tcg.h | 3 - 2 files changed, 170 insertions(+), 195 deletions(-) diff --git a/qemu/target/i386/translate.c b/qemu/target/i386/translate.c index d469a2c4..c963f79d 100644 --- a/qemu/target/i386/translate.c +++ b/qemu/target/i386/translate.c @@ -118,6 +118,7 @@ typedef struct DisasContext { TCGv cc_srcT; TCGv A0; TCGv T0; + TCGv T1; sigjmp_buf jmpbuf; struct uc_struct *uc; @@ -777,9 +778,8 @@ static void gen_op_update2_cc(DisasContext *s) TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv cpu_cc_dst = tcg_ctx->cpu_cc_dst; TCGv cpu_cc_src = tcg_ctx->cpu_cc_src; - TCGv cpu_T1 = tcg_ctx->cpu_T1; - tcg_gen_mov_tl(tcg_ctx, cpu_cc_src, cpu_T1); + tcg_gen_mov_tl(tcg_ctx, cpu_cc_src, s->T1); tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, s->T0); } @@ -789,10 +789,9 @@ static void gen_op_update3_cc(DisasContext *s, TCGv reg) TCGv cpu_cc_dst = tcg_ctx->cpu_cc_dst; TCGv cpu_cc_src = tcg_ctx->cpu_cc_src; TCGv cpu_cc_src2 = tcg_ctx->cpu_cc_src2; - TCGv cpu_T1 = tcg_ctx->cpu_T1; tcg_gen_mov_tl(tcg_ctx, cpu_cc_src2, reg); - tcg_gen_mov_tl(tcg_ctx, cpu_cc_src, cpu_T1); + tcg_gen_mov_tl(tcg_ctx, cpu_cc_src, s->T1); tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, s->T0); } @@ -800,9 +799,8 @@ static inline void gen_op_testl_T0_T1_cc(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv cpu_cc_dst = tcg_ctx->cpu_cc_dst; - TCGv cpu_T1 = tcg_ctx->cpu_T1; - tcg_gen_and_tl(tcg_ctx, cpu_cc_dst, s->T0, cpu_T1); + tcg_gen_and_tl(tcg_ctx, cpu_cc_dst, s->T0, s->T1); } static void gen_op_update_neg_cc(DisasContext *s) @@ -1252,11 +1250,8 @@ static inline void gen_lods(DisasContext *s, TCGMemOp ot) static inline void gen_scas(DisasContext *s, TCGMemOp ot) { - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_T1 = tcg_ctx->cpu_T1; - gen_string_movl_A0_EDI(s); - gen_op_ld_v(s, ot, cpu_T1, s->A0); + gen_op_ld_v(s, ot, s->T1, s->A0); gen_op(s, OP_CMPL, ot, R_EAX); gen_op_movl_T0_Dshift(s, ot); gen_op_add_reg_T0(s, s->aflag, R_EDI); @@ -1264,11 +1259,8 @@ static inline void gen_scas(DisasContext *s, TCGMemOp ot) static inline void gen_cmps(DisasContext *s, TCGMemOp ot) { - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_T1 = tcg_ctx->cpu_T1; - gen_string_movl_A0_EDI(s); - gen_op_ld_v(s, ot, cpu_T1, s->A0); + gen_op_ld_v(s, ot, s->T1, s->A0); gen_string_movl_A0_ESI(s); gen_op(s, OP_CMPL, ot, OR_TMP0); gen_op_movl_T0_Dshift(s, ot); @@ -1441,7 +1433,6 @@ static void gen_op(DisasContext *s, int op, TCGMemOp ot, int d) TCGv cpu_cc_dst = tcg_ctx->cpu_cc_dst; TCGv cpu_cc_src = tcg_ctx->cpu_cc_src; TCGv cpu_tmp4 = tcg_ctx->cpu_tmp4; - TCGv cpu_T1 = tcg_ctx->cpu_T1; if (d != OR_TMP0) { gen_op_mov_v_reg(s, ot, s->T0, d); @@ -1451,11 +1442,11 @@ static void gen_op(DisasContext *s, int op, TCGMemOp ot, int d) switch(op) { case OP_ADCL: if (s->prefix & PREFIX_LOCK) { - tcg_gen_add_tl(tcg_ctx, s->T0, cpu_tmp4, cpu_T1); + tcg_gen_add_tl(tcg_ctx, s->T0, cpu_tmp4, s->T1); tcg_gen_atomic_add_fetch_tl(tcg_ctx, s->T0, s->A0, s->T0, s->mem_index, ot | MO_LE); } else { - tcg_gen_add_tl(tcg_ctx, s->T0, s->T0, cpu_T1); + tcg_gen_add_tl(tcg_ctx, s->T0, s->T0, s->T1); tcg_gen_add_tl(tcg_ctx, s->T0, s->T0, cpu_tmp4); gen_op_st_rm_T0_A0(s, ot, d); } @@ -1464,12 +1455,12 @@ static void gen_op(DisasContext *s, int op, TCGMemOp ot, int d) break; case OP_SBBL: if (s->prefix & PREFIX_LOCK) { - tcg_gen_add_tl(tcg_ctx, s->T0, cpu_T1, cpu_tmp4); + tcg_gen_add_tl(tcg_ctx, s->T0, s->T1, cpu_tmp4); tcg_gen_neg_tl(tcg_ctx, s->T0, s->T0); tcg_gen_atomic_add_fetch_tl(tcg_ctx, s->T0, s->A0, s->T0, s->mem_index, ot | MO_LE); } else { - tcg_gen_sub_tl(tcg_ctx, s->T0, s->T0, cpu_T1); + tcg_gen_sub_tl(tcg_ctx, s->T0, s->T0, s->T1); tcg_gen_sub_tl(tcg_ctx, s->T0, s->T0, cpu_tmp4); gen_op_st_rm_T0_A0(s, ot, d); } @@ -1478,10 +1469,10 @@ static void gen_op(DisasContext *s, int op, TCGMemOp ot, int d) break; case OP_ADDL: if (s->prefix & PREFIX_LOCK) { - tcg_gen_atomic_add_fetch_tl(tcg_ctx, s->T0, s->A0, cpu_T1, + tcg_gen_atomic_add_fetch_tl(tcg_ctx, s->T0, s->A0, s->T1, s->mem_index, ot | MO_LE); } else { - tcg_gen_add_tl(tcg_ctx, s->T0, s->T0, cpu_T1); + tcg_gen_add_tl(tcg_ctx, s->T0, s->T0, s->T1); gen_op_st_rm_T0_A0(s, ot, d); } gen_op_update2_cc(s); @@ -1489,13 +1480,13 @@ static void gen_op(DisasContext *s, int op, TCGMemOp ot, int d) break; case OP_SUBL: if (s->prefix & PREFIX_LOCK) { - tcg_gen_neg_tl(tcg_ctx, s->T0, cpu_T1); + tcg_gen_neg_tl(tcg_ctx, s->T0, s->T1); tcg_gen_atomic_fetch_add_tl(tcg_ctx, s->cc_srcT, s->A0, s->T0, s->mem_index, ot | MO_LE); - tcg_gen_sub_tl(tcg_ctx, s->T0, s->cc_srcT, cpu_T1); + tcg_gen_sub_tl(tcg_ctx, s->T0, s->cc_srcT, s->T1); } else { tcg_gen_mov_tl(tcg_ctx, s->cc_srcT, s->T0); - tcg_gen_sub_tl(tcg_ctx, s->T0, s->T0, cpu_T1); + tcg_gen_sub_tl(tcg_ctx, s->T0, s->T0, s->T1); gen_op_st_rm_T0_A0(s, ot, d); } gen_op_update2_cc(s); @@ -1504,10 +1495,10 @@ static void gen_op(DisasContext *s, int op, TCGMemOp ot, int d) default: case OP_ANDL: if (s->prefix & PREFIX_LOCK) { - tcg_gen_atomic_and_fetch_tl(tcg_ctx, s->T0, s->A0, cpu_T1, + tcg_gen_atomic_and_fetch_tl(tcg_ctx, s->T0, s->A0, s->T1, s->mem_index, ot | MO_LE); } else { - tcg_gen_and_tl(tcg_ctx, s->T0, s->T0, cpu_T1); + tcg_gen_and_tl(tcg_ctx, s->T0, s->T0, s->T1); gen_op_st_rm_T0_A0(s, ot, d); } gen_op_update1_cc(s); @@ -1515,10 +1506,10 @@ static void gen_op(DisasContext *s, int op, TCGMemOp ot, int d) break; case OP_ORL: if (s->prefix & PREFIX_LOCK) { - tcg_gen_atomic_or_fetch_tl(tcg_ctx, s->T0, s->A0, cpu_T1, + tcg_gen_atomic_or_fetch_tl(tcg_ctx, s->T0, s->A0, s->T1, s->mem_index, ot | MO_LE); } else { - tcg_gen_or_tl(tcg_ctx, s->T0, s->T0, cpu_T1); + tcg_gen_or_tl(tcg_ctx, s->T0, s->T0, s->T1); gen_op_st_rm_T0_A0(s, ot, d); } gen_op_update1_cc(s); @@ -1526,19 +1517,19 @@ static void gen_op(DisasContext *s, int op, TCGMemOp ot, int d) break; case OP_XORL: if (s->prefix & PREFIX_LOCK) { - tcg_gen_atomic_xor_fetch_tl(tcg_ctx, s->T0, s->A0, cpu_T1, + tcg_gen_atomic_xor_fetch_tl(tcg_ctx, s->T0, s->A0, s->T1, s->mem_index, ot | MO_LE); } else { - tcg_gen_xor_tl(tcg_ctx, s->T0, s->T0, cpu_T1); + tcg_gen_xor_tl(tcg_ctx, s->T0, s->T0, s->T1); gen_op_st_rm_T0_A0(s, ot, d); } gen_op_update1_cc(s); set_cc_op(s, CC_OP_LOGICB + ot); break; case OP_CMPL: - tcg_gen_mov_tl(tcg_ctx, cpu_cc_src, cpu_T1); + tcg_gen_mov_tl(tcg_ctx, cpu_cc_src, s->T1); tcg_gen_mov_tl(tcg_ctx, s->cc_srcT, s->T0); - tcg_gen_sub_tl(tcg_ctx, cpu_cc_dst, s->T0, cpu_T1); + tcg_gen_sub_tl(tcg_ctx, cpu_cc_dst, s->T0, s->T1); set_cc_op(s, CC_OP_SUBB + ot); break; } @@ -1627,7 +1618,6 @@ static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1, target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f); TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv cpu_tmp0 = tcg_ctx->cpu_tmp0; - TCGv cpu_T1 = tcg_ctx->cpu_T1; /* load */ if (op1 == OR_TMP0) { @@ -1636,28 +1626,28 @@ static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1, gen_op_mov_v_reg(s, ot, s->T0, op1); } - tcg_gen_andi_tl(tcg_ctx, cpu_T1, cpu_T1, mask); - tcg_gen_subi_tl(tcg_ctx, cpu_tmp0, cpu_T1, 1); + tcg_gen_andi_tl(tcg_ctx, s->T1, s->T1, mask); + tcg_gen_subi_tl(tcg_ctx, cpu_tmp0, s->T1, 1); if (is_right) { if (is_arith) { gen_exts(s, ot, s->T0); tcg_gen_sar_tl(tcg_ctx, cpu_tmp0, s->T0, cpu_tmp0); - tcg_gen_sar_tl(tcg_ctx, s->T0, s->T0, cpu_T1); + tcg_gen_sar_tl(tcg_ctx, s->T0, s->T0, s->T1); } else { gen_extu(s, ot, s->T0); tcg_gen_shr_tl(tcg_ctx, cpu_tmp0, s->T0, cpu_tmp0); - tcg_gen_shr_tl(tcg_ctx, s->T0, s->T0, cpu_T1); + tcg_gen_shr_tl(tcg_ctx, s->T0, s->T0, s->T1); } } else { tcg_gen_shl_tl(tcg_ctx, cpu_tmp0, s->T0, cpu_tmp0); - tcg_gen_shl_tl(tcg_ctx, s->T0, s->T0, cpu_T1); + tcg_gen_shl_tl(tcg_ctx, s->T0, s->T0, s->T1); } /* store */ gen_op_st_rm_T0_A0(s, ot, op1); - gen_shift_flags(s, ot, s->T0, cpu_tmp0, cpu_T1, is_right); + gen_shift_flags(s, ot, s->T0, cpu_tmp0, s->T1, is_right); } static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, @@ -1714,7 +1704,6 @@ static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right) TCGv_i32 cpu_cc_op = tcg_ctx->cpu_cc_op; TCGv cpu_cc_dst = tcg_ctx->cpu_cc_dst; TCGv cpu_cc_src2 = tcg_ctx->cpu_cc_src2; - TCGv cpu_T1 = tcg_ctx->cpu_T1; /* load */ if (op1 == OR_TMP0) { @@ -1723,7 +1712,7 @@ static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right) gen_op_mov_v_reg(s, ot, s->T0, op1); } - tcg_gen_andi_tl(tcg_ctx, cpu_T1, cpu_T1, mask); + tcg_gen_andi_tl(tcg_ctx, s->T1, s->T1, mask); switch (ot) { case MO_8: @@ -1739,7 +1728,7 @@ static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right) #ifdef TARGET_X86_64 case MO_32: tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, s->T0); - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp3_i32, cpu_T1); + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp3_i32, s->T1); if (is_right) { tcg_gen_rotr_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32); } else { @@ -1750,9 +1739,9 @@ static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right) #endif default: if (is_right) { - tcg_gen_rotr_tl(tcg_ctx, s->T0, s->T0, cpu_T1); + tcg_gen_rotr_tl(tcg_ctx, s->T0, s->T0, s->T1); } else { - tcg_gen_rotl_tl(tcg_ctx, s->T0, s->T0, cpu_T1); + tcg_gen_rotl_tl(tcg_ctx, s->T0, s->T0, s->T1); } break; } @@ -1784,7 +1773,7 @@ static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right) exactly as we computed above. */ t0 = tcg_const_i32(tcg_ctx, 0); t1 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_trunc_tl_i32(tcg_ctx, t1, cpu_T1); + tcg_gen_trunc_tl_i32(tcg_ctx, t1, s->T1); tcg_gen_movi_i32(tcg_ctx, cpu_tmp2_i32, CC_OP_ADCOX); tcg_gen_movi_i32(tcg_ctx, cpu_tmp3_i32, CC_OP_EFLAGS); tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, cpu_cc_op, t1, t0, @@ -1886,7 +1875,6 @@ static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_T1 = tcg_ctx->cpu_T1; gen_compute_eflags(s); assert(s->cc_op == CC_OP_EFLAGS); @@ -1900,17 +1888,17 @@ static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1, if (is_right) { switch (ot) { case MO_8: - gen_helper_rcrb(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->T0, cpu_T1); + gen_helper_rcrb(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->T0, s->T1); break; case MO_16: - gen_helper_rcrw(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->T0, cpu_T1); + gen_helper_rcrw(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->T0, s->T1); break; case MO_32: - gen_helper_rcrl(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->T0, cpu_T1); + gen_helper_rcrl(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->T0, s->T1); break; #ifdef TARGET_X86_64 case MO_64: - gen_helper_rcrq(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->T0, cpu_T1); + gen_helper_rcrq(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->T0, s->T1); break; #endif default: @@ -1919,17 +1907,17 @@ static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1, } else { switch (ot) { case MO_8: - gen_helper_rclb(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->T0, cpu_T1); + gen_helper_rclb(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->T0, s->T1); break; case MO_16: - gen_helper_rclw(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->T0, cpu_T1); + gen_helper_rclw(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->T0, s->T1); break; case MO_32: - gen_helper_rcll(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->T0, cpu_T1); + gen_helper_rcll(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->T0, s->T1); break; #ifdef TARGET_X86_64 case MO_64: - gen_helper_rclq(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->T0, cpu_T1); + gen_helper_rclq(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->T0, s->T1); break; #endif default: @@ -1949,7 +1937,6 @@ static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1, TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv cpu_tmp0 = tcg_ctx->cpu_tmp0; TCGv cpu_tmp4 = tcg_ctx->cpu_tmp4; - TCGv cpu_T1 = tcg_ctx->cpu_T1; /* load */ if (op1 == OR_TMP0) { @@ -1967,11 +1954,11 @@ static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1, This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A portion by constructing it as a 32-bit value. */ if (is_right) { - tcg_gen_deposit_tl(tcg_ctx, cpu_tmp0, s->T0, cpu_T1, 16, 16); - tcg_gen_mov_tl(tcg_ctx, cpu_T1, s->T0); + tcg_gen_deposit_tl(tcg_ctx, cpu_tmp0, s->T0, s->T1, 16, 16); + tcg_gen_mov_tl(tcg_ctx, s->T1, s->T0); tcg_gen_mov_tl(tcg_ctx, s->T0, cpu_tmp0); } else { - tcg_gen_deposit_tl(tcg_ctx, cpu_T1, s->T0, cpu_T1, 16, 16); + tcg_gen_deposit_tl(tcg_ctx, s->T1, s->T0, s->T1, 16, 16); } /* FALLTHRU */ #ifdef TARGET_X86_64 @@ -1979,11 +1966,11 @@ static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1, /* Concatenate the two 32-bit values and use a 64-bit shift. */ tcg_gen_subi_tl(tcg_ctx, cpu_tmp0, count, 1); if (is_right) { - tcg_gen_concat_tl_i64(tcg_ctx, s->T0, s->T0, cpu_T1); + tcg_gen_concat_tl_i64(tcg_ctx, s->T0, s->T0, s->T1); tcg_gen_shr_i64(tcg_ctx, cpu_tmp0, s->T0, cpu_tmp0); tcg_gen_shr_i64(tcg_ctx, s->T0, s->T0, count); } else { - tcg_gen_concat_tl_i64(tcg_ctx, s->T0, cpu_T1, s->T0); + tcg_gen_concat_tl_i64(tcg_ctx, s->T0, s->T1, s->T0); tcg_gen_shl_i64(tcg_ctx, cpu_tmp0, s->T0, cpu_tmp0); tcg_gen_shl_i64(tcg_ctx, s->T0, s->T0, count); tcg_gen_shri_i64(tcg_ctx, cpu_tmp0, cpu_tmp0, 32); @@ -1998,24 +1985,24 @@ static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1, tcg_gen_subfi_tl(tcg_ctx, cpu_tmp4, mask + 1, count); tcg_gen_shr_tl(tcg_ctx, s->T0, s->T0, count); - tcg_gen_shl_tl(tcg_ctx, cpu_T1, cpu_T1, cpu_tmp4); + tcg_gen_shl_tl(tcg_ctx, s->T1, s->T1, cpu_tmp4); } else { tcg_gen_shl_tl(tcg_ctx, cpu_tmp0, s->T0, cpu_tmp0); if (ot == MO_16) { /* Only needed if count > 16, for Intel behaviour. */ tcg_gen_subfi_tl(tcg_ctx, cpu_tmp4, 33, count); - tcg_gen_shr_tl(tcg_ctx, cpu_tmp4, cpu_T1, cpu_tmp4); + tcg_gen_shr_tl(tcg_ctx, cpu_tmp4, s->T1, cpu_tmp4); tcg_gen_or_tl(tcg_ctx, cpu_tmp0, cpu_tmp0, cpu_tmp4); } tcg_gen_subfi_tl(tcg_ctx, cpu_tmp4, mask + 1, count); tcg_gen_shl_tl(tcg_ctx, s->T0, s->T0, count); - tcg_gen_shr_tl(tcg_ctx, cpu_T1, cpu_T1, cpu_tmp4); + tcg_gen_shr_tl(tcg_ctx, s->T1, s->T1, cpu_tmp4); } tcg_gen_movi_tl(tcg_ctx, cpu_tmp4, 0); - tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, cpu_T1, count, cpu_tmp4, - cpu_tmp4, cpu_T1); - tcg_gen_or_tl(tcg_ctx, s->T0, s->T0, cpu_T1); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, s->T1, count, cpu_tmp4, + cpu_tmp4, s->T1); + tcg_gen_or_tl(tcg_ctx, s->T0, s->T0, s->T1); break; } @@ -2028,11 +2015,8 @@ static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1, static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s) { - TCGContext *tcg_ctx = s1->uc->tcg_ctx; - TCGv cpu_T1 = tcg_ctx->cpu_T1; - if (s != OR_TMP1) - gen_op_mov_v_reg(s1, ot, cpu_T1, s); + gen_op_mov_v_reg(s1, ot, s1->T1, s); switch(op) { case OP_ROL: gen_rot_rm_T1(s1, ot, d, 0); @@ -2062,7 +2046,6 @@ static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s) static void gen_shifti(DisasContext *s, int op, TCGMemOp ot, int d, int c) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_T1 = tcg_ctx->cpu_T1; switch(op) { case OP_ROL: @@ -2083,7 +2066,7 @@ static void gen_shifti(DisasContext *s, int op, TCGMemOp ot, int d, int c) break; default: /* currently not optimized */ - tcg_gen_movi_tl(tcg_ctx, cpu_T1, c); + tcg_gen_movi_tl(tcg_ctx, s->T1, c); gen_shift(s, op, ot, d, OR_TMP1); break; } @@ -2474,12 +2457,11 @@ static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b, { CCPrepare cc; TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_T1 = tcg_ctx->cpu_T1; TCGv *cpu_regs = tcg_ctx->cpu_regs; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - cc = gen_prepare_cc(s, b, cpu_T1); + cc = gen_prepare_cc(s, b, s->T1); if (cc.mask != -1) { TCGv t0 = tcg_temp_new(tcg_ctx); tcg_gen_andi_tl(tcg_ctx, t0, cc.reg, cc.mask); @@ -2675,12 +2657,11 @@ static void gen_enter(DisasContext *s, int esp_addend, int level) int size = 1 << d_ot; TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv cpu_tmp0 = tcg_ctx->cpu_tmp0; - TCGv cpu_T1 = tcg_ctx->cpu_T1; TCGv *cpu_regs = tcg_ctx->cpu_regs; /* Push BP; compute FrameTemp into T1. */ - tcg_gen_subi_tl(tcg_ctx, cpu_T1, cpu_regs[R_ESP], size); - gen_lea_v_seg(s, a_ot, cpu_T1, R_SS, -1); + tcg_gen_subi_tl(tcg_ctx, s->T1, cpu_regs[R_ESP], size); + gen_lea_v_seg(s, a_ot, s->T1, R_SS, -1); gen_op_st_v(s, d_ot, cpu_regs[R_EBP], s->A0); level &= 31; @@ -2693,23 +2674,23 @@ static void gen_enter(DisasContext *s, int esp_addend, int level) gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); gen_op_ld_v(s, d_ot, cpu_tmp0, s->A0); - tcg_gen_subi_tl(tcg_ctx, s->A0, cpu_T1, size * i); + tcg_gen_subi_tl(tcg_ctx, s->A0, s->T1, size * i); gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); gen_op_st_v(s, d_ot, cpu_tmp0, s->A0); } /* Push the current FrameTemp as the last level. */ - tcg_gen_subi_tl(tcg_ctx, s->A0, cpu_T1, size * level); + tcg_gen_subi_tl(tcg_ctx, s->A0, s->T1, size * level); gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); - gen_op_st_v(s, d_ot, cpu_T1, s->A0); + gen_op_st_v(s, d_ot, s->T1, s->A0); } /* Copy the FrameTemp value to EBP. */ - gen_op_mov_reg_v(s, a_ot, R_EBP, cpu_T1); + gen_op_mov_reg_v(s, a_ot, R_EBP, s->T1); /* Compute the final value of ESP. */ - tcg_gen_subi_tl(tcg_ctx, cpu_T1, cpu_T1, esp_addend + size * level); - gen_op_mov_reg_v(s, a_ot, R_ESP, cpu_T1); + tcg_gen_subi_tl(tcg_ctx, s->T1, s->T1, esp_addend + size * level); + gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1); } static void gen_leave(DisasContext *s) @@ -2717,16 +2698,15 @@ static void gen_leave(DisasContext *s) TCGMemOp d_ot = mo_pushpop(s, s->dflag); TCGMemOp a_ot = mo_stacksize(s); TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_T1 = tcg_ctx->cpu_T1; TCGv *cpu_regs = tcg_ctx->cpu_regs; gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1); gen_op_ld_v(s, d_ot, s->T0, s->A0); - tcg_gen_addi_tl(tcg_ctx, cpu_T1, cpu_regs[R_EBP], 1 << d_ot); + tcg_gen_addi_tl(tcg_ctx, s->T1, cpu_regs[R_EBP], 1 << d_ot); gen_op_mov_reg_v(s, d_ot, R_EBP, s->T0); - gen_op_mov_reg_v(s, a_ot, R_ESP, cpu_T1); + gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1); } static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip) @@ -3538,7 +3518,6 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, TCGv cpu_cc_src = tcg_ctx->cpu_cc_src; TCGv cpu_cc_src2 = tcg_ctx->cpu_cc_src2; TCGv cpu_tmp0 = tcg_ctx->cpu_tmp0; - TCGv cpu_T1 = tcg_ctx->cpu_T1; TCGv *cpu_regs = tcg_ctx->cpu_regs; b &= 0xff; @@ -4351,10 +4330,10 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, tcg_gen_movcond_tl(tcg_ctx, TCG_COND_LEU, s->A0, s->A0, bound, s->A0, bound); tcg_temp_free(tcg_ctx, bound); - tcg_gen_movi_tl(tcg_ctx, cpu_T1, 1); - tcg_gen_shl_tl(tcg_ctx, cpu_T1, cpu_T1, s->A0); - tcg_gen_subi_tl(tcg_ctx, cpu_T1, cpu_T1, 1); - tcg_gen_and_tl(tcg_ctx, s->T0, s->T0, cpu_T1); + tcg_gen_movi_tl(tcg_ctx, s->T1, 1); + tcg_gen_shl_tl(tcg_ctx, s->T1, s->T1, s->A0); + tcg_gen_subi_tl(tcg_ctx, s->T1, s->T1, 1); + tcg_gen_and_tl(tcg_ctx, s->T0, s->T0, s->T1); gen_op_mov_reg_v(s, ot, reg, s->T0); gen_op_update1_cc(s); @@ -4370,19 +4349,19 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, } ot = mo_64_32(s->dflag); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - tcg_gen_ext8u_tl(tcg_ctx, cpu_T1, cpu_regs[s->vex_v]); + tcg_gen_ext8u_tl(tcg_ctx, s->T1, cpu_regs[s->vex_v]); { TCGv bound = tcg_const_tl(tcg_ctx, ot == MO_64 ? 63 : 31); /* Note that since we're using BMILG (in order to get O cleared) we need to store the inverse into C. */ tcg_gen_setcond_tl(tcg_ctx, TCG_COND_LT, cpu_cc_src, - cpu_T1, bound); - tcg_gen_movcond_tl(tcg_ctx, TCG_COND_GT, cpu_T1, cpu_T1, - bound, bound, cpu_T1); + s->T1, bound); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_GT, s->T1, s->T1, + bound, bound, s->T1); tcg_temp_free(tcg_ctx, bound); } tcg_gen_movi_tl(tcg_ctx, s->A0, -1); - tcg_gen_shl_tl(tcg_ctx, s->A0, s->A0, cpu_T1); + tcg_gen_shl_tl(tcg_ctx, s->A0, s->A0, s->T1); tcg_gen_andc_tl(tcg_ctx, s->T0, s->T0, s->A0); gen_op_mov_reg_v(s, ot, reg, s->T0); gen_op_update1_cc(s); @@ -4408,10 +4387,10 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, break; #ifdef TARGET_X86_64 case MO_64: - tcg_gen_mulu2_i64(tcg_ctx, s->T0, cpu_T1, + tcg_gen_mulu2_i64(tcg_ctx, s->T0, s->T1, s->T0, cpu_regs[R_EDX]); tcg_gen_mov_i64(tcg_ctx, cpu_regs[s->vex_v], s->T0); - tcg_gen_mov_i64(tcg_ctx, cpu_regs[reg], cpu_T1); + tcg_gen_mov_i64(tcg_ctx, cpu_regs[reg], s->T1); break; #endif } @@ -4428,11 +4407,11 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, /* Note that by zero-extending the mask operand, we automatically handle zero-extending the result. */ if (ot == MO_64) { - tcg_gen_mov_tl(tcg_ctx, cpu_T1, cpu_regs[s->vex_v]); + tcg_gen_mov_tl(tcg_ctx, s->T1, cpu_regs[s->vex_v]); } else { - tcg_gen_ext32u_tl(tcg_ctx, cpu_T1, cpu_regs[s->vex_v]); + tcg_gen_ext32u_tl(tcg_ctx, s->T1, cpu_regs[s->vex_v]); } - gen_helper_pdep(tcg_ctx, cpu_regs[reg], s->T0, cpu_T1); + gen_helper_pdep(tcg_ctx, cpu_regs[reg], s->T0, s->T1); break; case 0x2f5: /* pext Gy, By, Ey */ @@ -4446,11 +4425,11 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, /* Note that by zero-extending the mask operand, we automatically handle zero-extending the result. */ if (ot == MO_64) { - tcg_gen_mov_tl(tcg_ctx, cpu_T1, cpu_regs[s->vex_v]); + tcg_gen_mov_tl(tcg_ctx, s->T1, cpu_regs[s->vex_v]); } else { - tcg_gen_ext32u_tl(tcg_ctx, cpu_T1, cpu_regs[s->vex_v]); + tcg_gen_ext32u_tl(tcg_ctx, s->T1, cpu_regs[s->vex_v]); } - gen_helper_pext(tcg_ctx, cpu_regs[reg], s->T0, cpu_T1); + gen_helper_pext(tcg_ctx, cpu_regs[reg], s->T0, s->T1); break; case 0x1f6: /* adcx Gy, Ey */ @@ -4542,22 +4521,22 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, ot = mo_64_32(s->dflag); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); if (ot == MO_64) { - tcg_gen_andi_tl(tcg_ctx, cpu_T1, cpu_regs[s->vex_v], 63); + tcg_gen_andi_tl(tcg_ctx, s->T1, cpu_regs[s->vex_v], 63); } else { - tcg_gen_andi_tl(tcg_ctx, cpu_T1, cpu_regs[s->vex_v], 31); + tcg_gen_andi_tl(tcg_ctx, s->T1, cpu_regs[s->vex_v], 31); } if (b == 0x1f7) { - tcg_gen_shl_tl(tcg_ctx, s->T0, s->T0, cpu_T1); + tcg_gen_shl_tl(tcg_ctx, s->T0, s->T0, s->T1); } else if (b == 0x2f7) { if (ot != MO_64) { tcg_gen_ext32s_tl(tcg_ctx, s->T0, s->T0); } - tcg_gen_sar_tl(tcg_ctx, s->T0, s->T0, cpu_T1); + tcg_gen_sar_tl(tcg_ctx, s->T0, s->T0, s->T1); } else { if (ot != MO_64) { tcg_gen_ext32u_tl(tcg_ctx, s->T0, s->T0); } - tcg_gen_shr_tl(tcg_ctx, s->T0, s->T0, cpu_T1); + tcg_gen_shr_tl(tcg_ctx, s->T0, s->T0, s->T1); } gen_op_mov_reg_v(s, ot, reg, s->T0); break; @@ -4577,16 +4556,16 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, tcg_gen_mov_tl(tcg_ctx, cpu_cc_src, s->T0); switch (reg & 7) { case 1: /* blsr By,Ey */ - tcg_gen_subi_tl(tcg_ctx, cpu_T1, s->T0, 1); - tcg_gen_and_tl(tcg_ctx, s->T0, s->T0, cpu_T1); + tcg_gen_subi_tl(tcg_ctx, s->T1, s->T0, 1); + tcg_gen_and_tl(tcg_ctx, s->T0, s->T0, s->T1); break; case 2: /* blsmsk By,Ey */ - tcg_gen_subi_tl(tcg_ctx, cpu_T1, s->T0, 1); - tcg_gen_xor_tl(tcg_ctx, s->T0, s->T0, cpu_T1); + tcg_gen_subi_tl(tcg_ctx, s->T1, s->T0, 1); + tcg_gen_xor_tl(tcg_ctx, s->T0, s->T0, s->T1); break; case 3: /* blsi By, Ey */ - tcg_gen_neg_tl(tcg_ctx, cpu_T1, s->T0); - tcg_gen_and_tl(tcg_ctx, s->T0, s->T0, cpu_T1); + tcg_gen_neg_tl(tcg_ctx, s->T1, s->T0); + tcg_gen_and_tl(tcg_ctx, s->T0, s->T0, s->T1); break; default: goto unknown_op; @@ -5001,7 +4980,6 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) TCGv cpu_cc_src = tcg_ctx->cpu_cc_src; TCGv cpu_tmp0 = tcg_ctx->cpu_tmp0; TCGv cpu_tmp4 = tcg_ctx->cpu_tmp4; - TCGv cpu_T1 = tcg_ctx->cpu_T1; TCGv *cpu_regs = tcg_ctx->cpu_regs; TCGv *cpu_seg_base = tcg_ctx->cpu_seg_base; bool changed_cc_op = false; @@ -5274,7 +5252,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } else { opreg = rm; } - gen_op_mov_v_reg(s, ot, cpu_T1, reg); + gen_op_mov_v_reg(s, ot, s->T1, reg); gen_op(s, op, ot, opreg); break; case 1: /* OP Gv, Ev */ @@ -5284,17 +5262,17 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) rm = (modrm & 7) | REX_B(s); if (mod != 3) { gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, ot, cpu_T1, s->A0); + gen_op_ld_v(s, ot, s->T1, s->A0); } else if (op == OP_XORL && rm == reg) { goto xor_zero; } else { - gen_op_mov_v_reg(s, ot, cpu_T1, rm); + gen_op_mov_v_reg(s, ot, s->T1, rm); } gen_op(s, op, ot, reg); break; case 2: /* OP A, Iv */ val = insn_get(env, s, ot); - tcg_gen_movi_tl(tcg_ctx, cpu_T1, val); + tcg_gen_movi_tl(tcg_ctx, s->T1, val); gen_op(s, op, ot, OR_EAX); break; } @@ -5339,7 +5317,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) val = (int8_t)insn_get(env, s, MO_8); break; } - tcg_gen_movi_tl(tcg_ctx, cpu_T1, val); + tcg_gen_movi_tl(tcg_ctx, s->T1, val); gen_op(s, op, ot, opreg); } break; @@ -5381,7 +5359,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) switch(op) { case 0: /* test */ val = insn_get(env, s, ot); - tcg_gen_movi_tl(tcg_ctx, cpu_T1, val); + tcg_gen_movi_tl(tcg_ctx, s->T1, val); gen_op_testl_T0_T1_cc(s); set_cc_op(s, CC_OP_LOGICB + ot); break; @@ -5445,22 +5423,22 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 4: /* mul */ switch(ot) { case MO_8: - gen_op_mov_v_reg(s, MO_8, cpu_T1, R_EAX); + gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX); tcg_gen_ext8u_tl(tcg_ctx, s->T0, s->T0); - tcg_gen_ext8u_tl(tcg_ctx, cpu_T1, cpu_T1); + tcg_gen_ext8u_tl(tcg_ctx, s->T1, s->T1); /* XXX: use 32 bit mul which could be faster */ - tcg_gen_mul_tl(tcg_ctx, s->T0, s->T0, cpu_T1); + tcg_gen_mul_tl(tcg_ctx, s->T0, s->T0, s->T1); gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, s->T0); tcg_gen_andi_tl(tcg_ctx, cpu_cc_src, s->T0, 0xff00); set_cc_op(s, CC_OP_MULB); break; case MO_16: - gen_op_mov_v_reg(s, MO_16, cpu_T1, R_EAX); + gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX); tcg_gen_ext16u_tl(tcg_ctx, s->T0, s->T0); - tcg_gen_ext16u_tl(tcg_ctx, cpu_T1, cpu_T1); + tcg_gen_ext16u_tl(tcg_ctx, s->T1, s->T1); /* XXX: use 32 bit mul which could be faster */ - tcg_gen_mul_tl(tcg_ctx, s->T0, s->T0, cpu_T1); + tcg_gen_mul_tl(tcg_ctx, s->T0, s->T0, s->T1); gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, s->T0); tcg_gen_shri_tl(tcg_ctx, s->T0, s->T0, 16); @@ -5494,11 +5472,11 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 5: /* imul */ switch(ot) { case MO_8: - gen_op_mov_v_reg(s, MO_8, cpu_T1, R_EAX); + gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX); tcg_gen_ext8s_tl(tcg_ctx, s->T0, s->T0); - tcg_gen_ext8s_tl(tcg_ctx, cpu_T1, cpu_T1); + tcg_gen_ext8s_tl(tcg_ctx, s->T1, s->T1); /* XXX: use 32 bit mul which could be faster */ - tcg_gen_mul_tl(tcg_ctx, s->T0, s->T0, cpu_T1); + tcg_gen_mul_tl(tcg_ctx, s->T0, s->T0, s->T1); gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, s->T0); tcg_gen_ext8s_tl(tcg_ctx, cpu_tmp0, s->T0); @@ -5506,11 +5484,11 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) set_cc_op(s, CC_OP_MULB); break; case MO_16: - gen_op_mov_v_reg(s, MO_16, cpu_T1, R_EAX); + gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX); tcg_gen_ext16s_tl(tcg_ctx, s->T0, s->T0); - tcg_gen_ext16s_tl(tcg_ctx, cpu_T1, cpu_T1); + tcg_gen_ext16s_tl(tcg_ctx, s->T1, s->T1); /* XXX: use 32 bit mul which could be faster */ - tcg_gen_mul_tl(tcg_ctx, s->T0, s->T0, cpu_T1); + tcg_gen_mul_tl(tcg_ctx, s->T0, s->T0, s->T1); gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, s->T0); tcg_gen_ext16s_tl(tcg_ctx, cpu_tmp0, s->T0); @@ -5639,25 +5617,25 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_ext16u_tl(tcg_ctx, s->T0, s->T0); } next_eip = s->pc - s->cs_base; - tcg_gen_movi_tl(tcg_ctx, cpu_T1, next_eip); - gen_push_v(s, cpu_T1); + tcg_gen_movi_tl(tcg_ctx, s->T1, next_eip); + gen_push_v(s, s->T1); gen_op_jmp_v(s, s->T0); gen_bnd_jmp(s); gen_jr(s, s->T0); break; case 3: /* lcall Ev */ - gen_op_ld_v(s, ot, cpu_T1, s->A0); + gen_op_ld_v(s, ot, s->T1, s->A0); gen_add_A0_im(s, 1 << ot); gen_op_ld_v(s, MO_16, s->T0, s->A0); do_lcall: if (s->pe && !s->vm86) { tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, s->T0); - gen_helper_lcall_protected(tcg_ctx, cpu_env, cpu_tmp2_i32, cpu_T1, + gen_helper_lcall_protected(tcg_ctx, cpu_env, cpu_tmp2_i32, s->T1, tcg_const_i32(tcg_ctx, dflag - 1), tcg_const_tl(tcg_ctx, s->pc - s->cs_base)); } else { tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, s->T0); - gen_helper_lcall_real(tcg_ctx, cpu_env, cpu_tmp2_i32, cpu_T1, + gen_helper_lcall_real(tcg_ctx, cpu_env, cpu_tmp2_i32, s->T1, tcg_const_i32(tcg_ctx, dflag - 1), tcg_const_i32(tcg_ctx, s->pc - s->cs_base)); } @@ -5673,17 +5651,17 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_jr(s, s->T0); break; case 5: /* ljmp Ev */ - gen_op_ld_v(s, ot, cpu_T1, s->A0); + gen_op_ld_v(s, ot, s->T1, s->A0); gen_add_A0_im(s, 1 << ot); gen_op_ld_v(s, MO_16, s->T0, s->A0); do_ljmp: if (s->pe && !s->vm86) { tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, s->T0); - gen_helper_ljmp_protected(tcg_ctx, cpu_env, cpu_tmp2_i32, cpu_T1, + gen_helper_ljmp_protected(tcg_ctx, cpu_env, cpu_tmp2_i32, s->T1, tcg_const_tl(tcg_ctx, s->pc - s->cs_base)); } else { gen_op_movl_seg_T0_vm(s, R_CS); - gen_op_jmp_v(s, cpu_T1); + gen_op_jmp_v(s, s->T1); } tcg_gen_ld_tl(tcg_ctx, cpu_tmp4, tcg_ctx->cpu_env, offsetof(CPUX86State, eip)); gen_jr(s, cpu_tmp4); @@ -5704,7 +5682,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) reg = ((modrm >> 3) & 7) | rex_r; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - gen_op_mov_v_reg(s, ot, cpu_T1, reg); + gen_op_mov_v_reg(s, ot, s->T1, reg); gen_op_testl_T0_T1_cc(s); set_cc_op(s, CC_OP_LOGICB + ot); break; @@ -5715,7 +5693,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) val = insn_get(env, s, ot); gen_op_mov_v_reg(s, ot, s->T0, OR_EAX); - tcg_gen_movi_tl(tcg_ctx, cpu_T1, val); + tcg_gen_movi_tl(tcg_ctx, s->T1, val); gen_op_testl_T0_T1_cc(s); set_cc_op(s, CC_OP_LOGICB + ot); break; @@ -5781,25 +5759,25 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); if (b == 0x69) { val = insn_get(env, s, ot); - tcg_gen_movi_tl(tcg_ctx, cpu_T1, val); + tcg_gen_movi_tl(tcg_ctx, s->T1, val); } else if (b == 0x6b) { val = (int8_t)insn_get(env, s, MO_8); - tcg_gen_movi_tl(tcg_ctx, cpu_T1, val); + tcg_gen_movi_tl(tcg_ctx, s->T1, val); } else { - gen_op_mov_v_reg(s, ot, cpu_T1, reg); + gen_op_mov_v_reg(s, ot, s->T1, reg); } switch (ot) { #ifdef TARGET_X86_64 case MO_64: - tcg_gen_muls2_i64(tcg_ctx, cpu_regs[reg], cpu_T1, s->T0, cpu_T1); + tcg_gen_muls2_i64(tcg_ctx, cpu_regs[reg], s->T1, s->T0, s->T1); tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, cpu_regs[reg]); tcg_gen_sari_tl(tcg_ctx, cpu_cc_src, cpu_cc_dst, 63); - tcg_gen_sub_tl(tcg_ctx, cpu_cc_src, cpu_cc_src, cpu_T1); + tcg_gen_sub_tl(tcg_ctx, cpu_cc_src, cpu_cc_src, s->T1); break; #endif case MO_32: tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, s->T0); - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp3_i32, cpu_T1); + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp3_i32, s->T1); tcg_gen_muls2_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp3_i32, cpu_tmp2_i32, cpu_tmp3_i32); tcg_gen_extu_i32_tl(tcg_ctx, cpu_regs[reg], cpu_tmp2_i32); @@ -5810,9 +5788,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; default: tcg_gen_ext16s_tl(tcg_ctx, s->T0, s->T0); - tcg_gen_ext16s_tl(tcg_ctx, cpu_T1, cpu_T1); + tcg_gen_ext16s_tl(tcg_ctx, s->T1, s->T1); /* XXX: use 32 bit mul which could be faster */ - tcg_gen_mul_tl(tcg_ctx, s->T0, s->T0, cpu_T1); + tcg_gen_mul_tl(tcg_ctx, s->T0, s->T0, s->T1); tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, s->T0); tcg_gen_ext16s_tl(tcg_ctx, cpu_tmp0, s->T0); tcg_gen_sub_tl(tcg_ctx, cpu_cc_src, s->T0, cpu_tmp0); @@ -5830,22 +5808,22 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_op_mov_v_reg(s, ot, s->T0, reg); if (mod == 3) { rm = (modrm & 7) | REX_B(s); - gen_op_mov_v_reg(s, ot, cpu_T1, rm); - tcg_gen_add_tl(tcg_ctx, s->T0, s->T0, cpu_T1); - gen_op_mov_reg_v(s, ot, reg, cpu_T1); + gen_op_mov_v_reg(s, ot, s->T1, rm); + tcg_gen_add_tl(tcg_ctx, s->T0, s->T0, s->T1); + gen_op_mov_reg_v(s, ot, reg, s->T1); gen_op_mov_reg_v(s, ot, rm, s->T0); } else { gen_lea_modrm(env, s, modrm); if (s->prefix & PREFIX_LOCK) { - tcg_gen_atomic_fetch_add_tl(tcg_ctx, cpu_T1, s->A0, s->T0, + tcg_gen_atomic_fetch_add_tl(tcg_ctx, s->T1, s->A0, s->T0, s->mem_index, ot | MO_LE); - tcg_gen_add_tl(tcg_ctx, s->T0, s->T0, cpu_T1); + tcg_gen_add_tl(tcg_ctx, s->T0, s->T0, s->T1); } else { - gen_op_ld_v(s, ot, cpu_T1, s->A0); - tcg_gen_add_tl(tcg_ctx, s->T0, s->T0, cpu_T1); + gen_op_ld_v(s, ot, s->T1, s->A0); + tcg_gen_add_tl(tcg_ctx, s->T0, s->T0, s->T1); gen_op_st_v(s, ot, s->T0, s->A0); } - gen_op_mov_reg_v(s, ot, reg, cpu_T1); + gen_op_mov_reg_v(s, ot, reg, s->T1); } gen_op_update2_cc(s); set_cc_op(s, CC_OP_ADDB + ot); @@ -6264,16 +6242,16 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) rm = (modrm & 7) | REX_B(s); do_xchg_reg: gen_op_mov_v_reg(s, ot, s->T0, reg); - gen_op_mov_v_reg(s, ot, cpu_T1, rm); + gen_op_mov_v_reg(s, ot, s->T1, rm); gen_op_mov_reg_v(s, ot, rm, s->T0); - gen_op_mov_reg_v(s, ot, reg, cpu_T1); + gen_op_mov_reg_v(s, ot, reg, s->T1); } else { gen_lea_modrm(env, s, modrm); gen_op_mov_v_reg(s, ot, s->T0, reg); /* for xchg, lock is implicit */ - tcg_gen_atomic_xchg_tl(tcg_ctx, cpu_T1, s->A0, s->T0, + tcg_gen_atomic_xchg_tl(tcg_ctx, s->T1, s->A0, s->T0, s->mem_index, ot | MO_LE); - gen_op_mov_reg_v(s, ot, reg, cpu_T1); + gen_op_mov_reg_v(s, ot, reg, s->T1); } break; case 0xc4: /* les Gv */ @@ -6300,13 +6278,13 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (mod == 3) goto illegal_op; gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, ot, cpu_T1, s->A0); + gen_op_ld_v(s, ot, s->T1, s->A0); gen_add_A0_im(s, 1 << ot); /* load the segment first to handle exceptions properly */ gen_op_ld_v(s, MO_16, s->T0, s->A0); gen_movl_seg_T0(s, op); /* then put the data */ - gen_op_mov_reg_v(s, ot, reg, cpu_T1); + gen_op_mov_reg_v(s, ot, reg, s->T1); if (s->base.is_jmp) { gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); @@ -6385,7 +6363,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } else { opreg = rm; } - gen_op_mov_v_reg(s, ot, cpu_T1, reg); + gen_op_mov_v_reg(s, ot, s->T1, reg); if (shift) { TCGv imm = tcg_const_tl(tcg_ctx, x86_ldub_code(env, s)); @@ -7004,8 +6982,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_check_io(s, ot, pc_start - s->cs_base, SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes)); tcg_gen_movi_i32(tcg_ctx, cpu_tmp2_i32, val); - gen_helper_in_func(s, ot, cpu_T1, cpu_tmp2_i32); - gen_op_mov_reg_v(s, ot, R_EAX, cpu_T1); + gen_helper_in_func(s, ot, s->T1, cpu_tmp2_i32); + gen_op_mov_reg_v(s, ot, R_EAX, s->T1); gen_bpt_io(s, cpu_tmp2_i32, ot); break; case 0xe6: @@ -7015,9 +6993,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_movi_tl(tcg_ctx, s->T0, val); gen_check_io(s, ot, pc_start - s->cs_base, svm_is_rep(prefixes)); - gen_op_mov_v_reg(s, ot, cpu_T1, R_EAX); + gen_op_mov_v_reg(s, ot, s->T1, R_EAX); tcg_gen_movi_i32(tcg_ctx, cpu_tmp2_i32, val); - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp3_i32, cpu_T1); + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp3_i32, s->T1); gen_helper_out_func(s, ot, cpu_tmp2_i32, cpu_tmp3_i32); gen_bpt_io(s, cpu_tmp2_i32, ot); @@ -7029,8 +7007,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_check_io(s, ot, pc_start - s->cs_base, SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes)); tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, s->T0); - gen_helper_in_func(s, ot, cpu_T1, cpu_tmp2_i32); - gen_op_mov_reg_v(s, ot, R_EAX, cpu_T1); + gen_helper_in_func(s, ot, s->T1, cpu_tmp2_i32); + gen_op_mov_reg_v(s, ot, R_EAX, s->T1); gen_bpt_io(s, cpu_tmp2_i32, ot); break; case 0xee: @@ -7039,10 +7017,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_ext16u_tl(tcg_ctx, s->T0, cpu_regs[R_EDX]); gen_check_io(s, ot, pc_start - s->cs_base, svm_is_rep(prefixes)); - gen_op_mov_v_reg(s, ot, cpu_T1, R_EAX); + gen_op_mov_v_reg(s, ot, s->T1, R_EAX); tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, s->T0); - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp3_i32, cpu_T1); + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp3_i32, s->T1); gen_helper_out_func(s, ot, cpu_tmp2_i32, cpu_tmp3_i32); gen_bpt_io(s, cpu_tmp2_i32, ot); break; @@ -7144,7 +7122,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) selector = insn_get(env, s, MO_16); tcg_gen_movi_tl(tcg_ctx, s->T0, selector); - tcg_gen_movi_tl(tcg_ctx, cpu_T1, offset); + tcg_gen_movi_tl(tcg_ctx, s->T1, offset); } goto do_lcall; case 0xe9: /* jmp im */ @@ -7173,7 +7151,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) selector = insn_get(env, s, MO_16); tcg_gen_movi_tl(tcg_ctx, s->T0, selector); - tcg_gen_movi_tl(tcg_ctx, cpu_T1, offset); + tcg_gen_movi_tl(tcg_ctx, s->T1, offset); } goto do_ljmp; case 0xeb: /* jmp Jb */ @@ -7353,7 +7331,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } /* load shift */ val = x86_ldub_code(env, s); - tcg_gen_movi_tl(tcg_ctx, cpu_T1, val); + tcg_gen_movi_tl(tcg_ctx, s->T1, val); if (op < 4) goto unknown_op; op -= 4; @@ -7375,12 +7353,12 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); - gen_op_mov_v_reg(s, MO_32, cpu_T1, reg); + gen_op_mov_v_reg(s, MO_32, s->T1, reg); if (mod != 3) { AddressParts a = gen_lea_modrm_0(env, s, modrm); /* specific case: we need to add a displacement */ - gen_exts(s, ot, cpu_T1); - tcg_gen_sari_tl(tcg_ctx, cpu_tmp0, cpu_T1, 3 + ot); + gen_exts(s, ot, s->T1); + tcg_gen_sari_tl(tcg_ctx, cpu_tmp0, s->T1, 3 + ot); tcg_gen_shli_tl(tcg_ctx, cpu_tmp0, cpu_tmp0, ot); tcg_gen_add_tl(tcg_ctx, s->A0, gen_lea_modrm_1(s, a), cpu_tmp0); gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override); @@ -7391,9 +7369,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_op_mov_v_reg(s, ot, s->T0, rm); } bt_op: - tcg_gen_andi_tl(tcg_ctx, cpu_T1, cpu_T1, (1 << (3 + ot)) - 1); + tcg_gen_andi_tl(tcg_ctx, s->T1, s->T1, (1 << (3 + ot)) - 1); tcg_gen_movi_tl(tcg_ctx, cpu_tmp0, 1); - tcg_gen_shl_tl(tcg_ctx, cpu_tmp0, cpu_tmp0, cpu_T1); + tcg_gen_shl_tl(tcg_ctx, cpu_tmp0, cpu_tmp0, s->T1); if (s->prefix & PREFIX_LOCK) { switch (op) { case 0: /* bt */ @@ -7416,9 +7394,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) s->mem_index, ot | MO_LE); break; } - tcg_gen_shr_tl(tcg_ctx, cpu_tmp4, s->T0, cpu_T1); + tcg_gen_shr_tl(tcg_ctx, cpu_tmp4, s->T0, s->T1); } else { - tcg_gen_shr_tl(tcg_ctx, cpu_tmp4, s->T0, cpu_T1); + tcg_gen_shr_tl(tcg_ctx, cpu_tmp4, s->T0, s->T1); switch (op) { case 0: /* bt */ /* Data already loaded; nothing to do. */ @@ -7514,8 +7492,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (b & 1) { /* For bsr, return the bit index of the first 1 bit, not the count of leading zeros. */ - tcg_gen_xori_tl(tcg_ctx, cpu_T1, cpu_regs[reg], TARGET_LONG_BITS - 1); - tcg_gen_clz_tl(tcg_ctx, s->T0, s->T0, cpu_T1); + tcg_gen_xori_tl(tcg_ctx, s->T1, cpu_regs[reg], TARGET_LONG_BITS - 1); + tcg_gen_clz_tl(tcg_ctx, s->T0, s->T0, s->T1); tcg_gen_xori_tl(tcg_ctx, s->T0, s->T0, TARGET_LONG_BITS - 1); } else { tcg_gen_ctz_tl(tcg_ctx, s->T0, s->T0, cpu_regs[reg]); @@ -8109,14 +8087,14 @@ case 0x101: } gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_WRITE); gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, MO_16, cpu_T1, s->A0); + gen_op_ld_v(s, MO_16, s->T1, s->A0); gen_add_A0_im(s, 2); gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0); if (dflag == MO_16) { tcg_gen_andi_tl(tcg_ctx, s->T0, s->T0, 0xffffff); } tcg_gen_st_tl(tcg_ctx, s->T0, cpu_env, offsetof(CPUX86State, gdt.base)); - tcg_gen_st32_tl(tcg_ctx, cpu_T1, cpu_env, offsetof(CPUX86State, gdt.limit)); + tcg_gen_st32_tl(tcg_ctx, s->T1, cpu_env, offsetof(CPUX86State, gdt.limit)); break; CASE_MODRM_MEM_OP(3): /* lidt */ @@ -8126,14 +8104,14 @@ case 0x101: } gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_WRITE); gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, MO_16, cpu_T1, s->A0); + gen_op_ld_v(s, MO_16, s->T1, s->A0); gen_add_A0_im(s, 2); gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0); if (dflag == MO_16) { tcg_gen_andi_tl(tcg_ctx, s->T0, s->T0, 0xffffff); } tcg_gen_st_tl(tcg_ctx, s->T0, cpu_env, offsetof(CPUX86State, idt.base)); - tcg_gen_st32_tl(tcg_ctx, cpu_T1, cpu_env, offsetof(CPUX86State, idt.limit)); + tcg_gen_st32_tl(tcg_ctx, s->T1, cpu_env, offsetof(CPUX86State, idt.limit)); break; CASE_MODRM_OP(4): /* smsw */ @@ -9166,7 +9144,7 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) dc->prev_pc = dcbase->pc_first; dc->T0 = tcg_temp_new(tcg_ctx); - tcg_ctx->cpu_T1 = tcg_temp_new(tcg_ctx); + dc->T1 = tcg_temp_new(tcg_ctx); dc->A0 = tcg_temp_new(tcg_ctx); diff --git a/qemu/tcg/tcg.h b/qemu/tcg/tcg.h index 741615ff..5336f872 100644 --- a/qemu/tcg/tcg.h +++ b/qemu/tcg/tcg.h @@ -826,9 +826,6 @@ struct TCGContext { TCGv cpu_cc_src; TCGv cpu_cc_src2; - /* qemu/target-i386/translate.c: local temps */ - TCGv cpu_T1; - /* qemu/target-i386/translate.c: local register indexes (only used inside old micro ops) */ TCGv cpu_tmp0, cpu_tmp4; TCGv_ptr cpu_ptr0, cpu_ptr1;