diff --git a/qemu/target/i386/translate.c b/qemu/target/i386/translate.c index 561dadf9..010efdc8 100644 --- a/qemu/target/i386/translate.c +++ b/qemu/target/i386/translate.c @@ -116,6 +116,7 @@ typedef struct DisasContext { /* TCG local temps */ TCGv cc_srcT; + TCGv A0; sigjmp_buf jmpbuf; struct uc_struct *uc; @@ -459,11 +460,10 @@ static inline void gen_op_mov_v_reg(TCGContext *s, TCGMemOp ot, TCGv t0, int reg static void gen_add_A0_im(DisasContext *s, int val) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = tcg_ctx->cpu_A0; - tcg_gen_addi_tl(tcg_ctx, cpu_A0, cpu_A0, val); + tcg_gen_addi_tl(tcg_ctx, s->A0, s->A0, val); if (!CODE64(s)) { - tcg_gen_ext32u_tl(tcg_ctx, cpu_A0, cpu_A0); + tcg_gen_ext32u_tl(tcg_ctx, s->A0, s->A0); } } @@ -508,11 +508,10 @@ static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0) static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = tcg_ctx->cpu_A0; TCGv cpu_T0 = tcg_ctx->cpu_T0; if (d == OR_TMP0) { - gen_op_st_v(s, idx, cpu_T0, cpu_A0); + gen_op_st_v(s, idx, cpu_T0, s->A0); } else { gen_op_mov_reg_v(tcg_ctx, idx, d, cpu_T0); } @@ -534,13 +533,12 @@ static void gen_lea_v_seg(DisasContext *s, TCGMemOp aflag, TCGv a0, int def_seg, int ovr_seg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = tcg_ctx->cpu_A0; switch (aflag) { #ifdef TARGET_X86_64 case MO_64: if (ovr_seg < 0) { - tcg_gen_mov_tl(tcg_ctx, cpu_A0, a0); + tcg_gen_mov_tl(tcg_ctx, s->A0, a0); return; } break; @@ -551,14 +549,14 @@ static void gen_lea_v_seg(DisasContext *s, TCGMemOp aflag, TCGv a0, ovr_seg = def_seg; } if (ovr_seg < 0) { - tcg_gen_ext32u_tl(tcg_ctx, cpu_A0, a0); + tcg_gen_ext32u_tl(tcg_ctx, s->A0, a0); return; } break; case MO_16: /* 16 bit address */ - tcg_gen_ext16u_tl(tcg_ctx, cpu_A0, a0); - a0 = cpu_A0; + tcg_gen_ext16u_tl(tcg_ctx, s->A0, a0); + a0 = s->A0; if (ovr_seg < 0) { if (s->addseg) { ovr_seg = def_seg; @@ -576,13 +574,13 @@ static void gen_lea_v_seg(DisasContext *s, TCGMemOp aflag, TCGv a0, TCGv seg = cpu_seg_base[ovr_seg]; if (aflag == MO_64) { - tcg_gen_add_tl(tcg_ctx, cpu_A0, a0, seg); + tcg_gen_add_tl(tcg_ctx, s->A0, a0, seg); } else if (CODE64(s)) { - tcg_gen_ext32u_tl(tcg_ctx, cpu_A0, a0); - tcg_gen_add_tl(tcg_ctx, cpu_A0, cpu_A0, seg); + tcg_gen_ext32u_tl(tcg_ctx, s->A0, a0); + tcg_gen_add_tl(tcg_ctx, s->A0, s->A0, seg); } else { - tcg_gen_add_tl(tcg_ctx, cpu_A0, a0, seg); - tcg_gen_ext32u_tl(tcg_ctx, cpu_A0, cpu_A0); + tcg_gen_add_tl(tcg_ctx, s->A0, a0, seg); + tcg_gen_ext32u_tl(tcg_ctx, s->A0, s->A0); } } } @@ -748,13 +746,12 @@ static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip, static inline void gen_movs(DisasContext *s, TCGMemOp ot) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = tcg_ctx->cpu_A0; TCGv cpu_T0 = tcg_ctx->cpu_T0; gen_string_movl_A0_ESI(s); - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); gen_string_movl_A0_EDI(s); - gen_op_st_v(s, ot, cpu_T0, cpu_A0); + gen_op_st_v(s, ot, cpu_T0, s->A0); gen_op_movl_T0_Dshift(tcg_ctx, ot); gen_op_add_reg_T0(tcg_ctx, s->aflag, R_ESI); gen_op_add_reg_T0(tcg_ctx, s->aflag, R_EDI); @@ -1234,12 +1231,11 @@ static TCGLabel *gen_jz_ecx_string(DisasContext *s, target_ulong next_eip) static inline void gen_stos(DisasContext *s, TCGMemOp ot) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = tcg_ctx->cpu_A0; TCGv cpu_T0 = tcg_ctx->cpu_T0; gen_op_mov_v_reg(tcg_ctx, MO_32, cpu_T0, R_EAX); gen_string_movl_A0_EDI(s); - gen_op_st_v(s, ot, cpu_T0, cpu_A0); + gen_op_st_v(s, ot, cpu_T0, s->A0); gen_op_movl_T0_Dshift(tcg_ctx, ot); gen_op_add_reg_T0(tcg_ctx, s->aflag, R_EDI); } @@ -1247,11 +1243,10 @@ static inline void gen_stos(DisasContext *s, TCGMemOp ot) static inline void gen_lods(DisasContext *s, TCGMemOp ot) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = tcg_ctx->cpu_A0; TCGv cpu_T0 = tcg_ctx->cpu_T0; gen_string_movl_A0_ESI(s); - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); gen_op_mov_reg_v(tcg_ctx, ot, R_EAX, cpu_T0); gen_op_movl_T0_Dshift(tcg_ctx, ot); gen_op_add_reg_T0(tcg_ctx, s->aflag, R_ESI); @@ -1260,11 +1255,10 @@ static inline void gen_lods(DisasContext *s, TCGMemOp ot) static inline void gen_scas(DisasContext *s, TCGMemOp ot) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = tcg_ctx->cpu_A0; TCGv cpu_T1 = tcg_ctx->cpu_T1; gen_string_movl_A0_EDI(s); - gen_op_ld_v(s, ot, cpu_T1, cpu_A0); + gen_op_ld_v(s, ot, cpu_T1, s->A0); gen_op(s, OP_CMPL, ot, R_EAX); gen_op_movl_T0_Dshift(tcg_ctx, ot); gen_op_add_reg_T0(tcg_ctx, s->aflag, R_EDI); @@ -1273,11 +1267,10 @@ static inline void gen_scas(DisasContext *s, TCGMemOp ot) static inline void gen_cmps(DisasContext *s, TCGMemOp ot) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = tcg_ctx->cpu_A0; TCGv cpu_T1 = tcg_ctx->cpu_T1; gen_string_movl_A0_EDI(s); - gen_op_ld_v(s, ot, cpu_T1, cpu_A0); + gen_op_ld_v(s, ot, cpu_T1, s->A0); gen_string_movl_A0_ESI(s); gen_op(s, OP_CMPL, ot, OR_TMP0); gen_op_movl_T0_Dshift(tcg_ctx, ot); @@ -1303,7 +1296,6 @@ static inline void gen_ins(DisasContext *s, TCGMemOp ot) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 cpu_tmp2_i32 = tcg_ctx->cpu_tmp2_i32; - TCGv cpu_A0 = tcg_ctx->cpu_A0; TCGv cpu_T0 = tcg_ctx->cpu_T0; TCGv *cpu_regs = tcg_ctx->cpu_regs; @@ -1311,11 +1303,11 @@ static inline void gen_ins(DisasContext *s, TCGMemOp ot) /* Note: we must do this dummy write first to be restartable in case of page fault. */ tcg_gen_movi_tl(tcg_ctx, cpu_T0, 0); - gen_op_st_v(s, ot, cpu_T0, cpu_A0); + gen_op_st_v(s, ot, cpu_T0, s->A0); tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, cpu_regs[R_EDX]); tcg_gen_andi_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp2_i32, 0xffff); gen_helper_in_func(tcg_ctx, ot, cpu_T0, cpu_tmp2_i32); - gen_op_st_v(s, ot, cpu_T0, cpu_A0); + gen_op_st_v(s, ot, cpu_T0, s->A0); gen_op_movl_T0_Dshift(tcg_ctx, ot); gen_op_add_reg_T0(tcg_ctx, s->aflag, R_EDI); gen_bpt_io(s, cpu_tmp2_i32, ot); @@ -1326,12 +1318,11 @@ static inline void gen_outs(DisasContext *s, TCGMemOp ot) TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 cpu_tmp2_i32 = tcg_ctx->cpu_tmp2_i32; TCGv_i32 cpu_tmp3_i32 = tcg_ctx->cpu_tmp3_i32; - TCGv cpu_A0 = tcg_ctx->cpu_A0; TCGv cpu_T0 = tcg_ctx->cpu_T0; TCGv *cpu_regs = tcg_ctx->cpu_regs; gen_string_movl_A0_ESI(s); - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, cpu_regs[R_EDX]); tcg_gen_andi_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp2_i32, 0xffff); @@ -1447,7 +1438,6 @@ static void gen_helper_fp_arith_STN_ST0(TCGContext *s, int op, int opreg) static void gen_op(DisasContext *s, int op, TCGMemOp ot, int d) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = tcg_ctx->cpu_A0; TCGv cpu_cc_dst = tcg_ctx->cpu_cc_dst; TCGv cpu_cc_src = tcg_ctx->cpu_cc_src; TCGv cpu_tmp4 = tcg_ctx->cpu_tmp4; @@ -1457,13 +1447,13 @@ static void gen_op(DisasContext *s, int op, TCGMemOp ot, int d) if (d != OR_TMP0) { gen_op_mov_v_reg(tcg_ctx, ot, cpu_T0, d); } else if (!(s->prefix & PREFIX_LOCK)) { - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); } switch(op) { case OP_ADCL: if (s->prefix & PREFIX_LOCK) { tcg_gen_add_tl(tcg_ctx, cpu_T0, cpu_tmp4, cpu_T1); - tcg_gen_atomic_add_fetch_tl(tcg_ctx, cpu_T0, cpu_A0, cpu_T0, + tcg_gen_atomic_add_fetch_tl(tcg_ctx, cpu_T0, s->A0, cpu_T0, s->mem_index, ot | MO_LE); } else { tcg_gen_add_tl(tcg_ctx, cpu_T0, cpu_T0, cpu_T1); @@ -1477,7 +1467,7 @@ static void gen_op(DisasContext *s, int op, TCGMemOp ot, int d) if (s->prefix & PREFIX_LOCK) { tcg_gen_add_tl(tcg_ctx, cpu_T0, cpu_T1, cpu_tmp4); tcg_gen_neg_tl(tcg_ctx, cpu_T0, cpu_T0); - tcg_gen_atomic_add_fetch_tl(tcg_ctx, cpu_T0, cpu_A0, cpu_T0, + tcg_gen_atomic_add_fetch_tl(tcg_ctx, cpu_T0, s->A0, cpu_T0, s->mem_index, ot | MO_LE); } else { tcg_gen_sub_tl(tcg_ctx, cpu_T0, cpu_T0, cpu_T1); @@ -1489,7 +1479,7 @@ static void gen_op(DisasContext *s, int op, TCGMemOp ot, int d) break; case OP_ADDL: if (s->prefix & PREFIX_LOCK) { - tcg_gen_atomic_add_fetch_tl(tcg_ctx, cpu_T0, cpu_A0, cpu_T1, + tcg_gen_atomic_add_fetch_tl(tcg_ctx, cpu_T0, s->A0, cpu_T1, s->mem_index, ot | MO_LE); } else { tcg_gen_add_tl(tcg_ctx, cpu_T0, cpu_T0, cpu_T1); @@ -1501,7 +1491,7 @@ static void gen_op(DisasContext *s, int op, TCGMemOp ot, int d) case OP_SUBL: if (s->prefix & PREFIX_LOCK) { tcg_gen_neg_tl(tcg_ctx, cpu_T0, cpu_T1); - tcg_gen_atomic_fetch_add_tl(tcg_ctx, s->cc_srcT, cpu_A0, cpu_T0, + tcg_gen_atomic_fetch_add_tl(tcg_ctx, s->cc_srcT, s->A0, cpu_T0, s->mem_index, ot | MO_LE); tcg_gen_sub_tl(tcg_ctx, cpu_T0, s->cc_srcT, cpu_T1); } else { @@ -1515,7 +1505,7 @@ static void gen_op(DisasContext *s, int op, TCGMemOp ot, int d) default: case OP_ANDL: if (s->prefix & PREFIX_LOCK) { - tcg_gen_atomic_and_fetch_tl(tcg_ctx, cpu_T0, cpu_A0, cpu_T1, + tcg_gen_atomic_and_fetch_tl(tcg_ctx, cpu_T0, s->A0, cpu_T1, s->mem_index, ot | MO_LE); } else { tcg_gen_and_tl(tcg_ctx, cpu_T0, cpu_T0, cpu_T1); @@ -1526,7 +1516,7 @@ static void gen_op(DisasContext *s, int op, TCGMemOp ot, int d) break; case OP_ORL: if (s->prefix & PREFIX_LOCK) { - tcg_gen_atomic_or_fetch_tl(tcg_ctx, cpu_T0, cpu_A0, cpu_T1, + tcg_gen_atomic_or_fetch_tl(tcg_ctx, cpu_T0, s->A0, cpu_T1, s->mem_index, ot | MO_LE); } else { tcg_gen_or_tl(tcg_ctx, cpu_T0, cpu_T0, cpu_T1); @@ -1537,7 +1527,7 @@ static void gen_op(DisasContext *s, int op, TCGMemOp ot, int d) break; case OP_XORL: if (s->prefix & PREFIX_LOCK) { - tcg_gen_atomic_xor_fetch_tl(tcg_ctx, cpu_T0, cpu_A0, cpu_T1, + tcg_gen_atomic_xor_fetch_tl(tcg_ctx, cpu_T0, s->A0, cpu_T1, s->mem_index, ot | MO_LE); } else { tcg_gen_xor_tl(tcg_ctx, cpu_T0, cpu_T0, cpu_T1); @@ -1559,20 +1549,19 @@ static void gen_op(DisasContext *s, int op, TCGMemOp ot, int d) static void gen_inc(DisasContext *s, TCGMemOp ot, int d, int c) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = tcg_ctx->cpu_A0; TCGv cpu_cc_dst = tcg_ctx->cpu_cc_dst; TCGv cpu_cc_src = tcg_ctx->cpu_cc_src; TCGv cpu_T0 = tcg_ctx->cpu_T0; if (s->prefix & PREFIX_LOCK) { tcg_gen_movi_tl(tcg_ctx, cpu_T0, c > 0 ? 1 : -1); - tcg_gen_atomic_add_fetch_tl(tcg_ctx, cpu_T0, cpu_A0, cpu_T0, + tcg_gen_atomic_add_fetch_tl(tcg_ctx, cpu_T0, s->A0, cpu_T0, s->mem_index, ot | MO_LE); } else { if (d != OR_TMP0) { gen_op_mov_v_reg(tcg_ctx, ot, cpu_T0, d); } else { - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); } tcg_gen_addi_tl(tcg_ctx, cpu_T0, cpu_T0, (c > 0 ? 1 : -1)); gen_op_st_rm_T0_A0(s, ot, d); @@ -1639,14 +1628,13 @@ static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1, { target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f); TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = tcg_ctx->cpu_A0; TCGv cpu_tmp0 = tcg_ctx->cpu_tmp0; TCGv cpu_T0 = tcg_ctx->cpu_T0; TCGv cpu_T1 = tcg_ctx->cpu_T1; /* load */ if (op1 == OR_TMP0) { - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); } else { gen_op_mov_v_reg(tcg_ctx, ot, cpu_T0, op1); } @@ -1680,7 +1668,6 @@ static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, { int mask = (ot == MO_64 ? 0x3f : 0x1f); TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = tcg_ctx->cpu_A0; TCGv cpu_cc_dst = tcg_ctx->cpu_cc_dst; TCGv cpu_cc_src = tcg_ctx->cpu_cc_src; TCGv cpu_tmp4 = tcg_ctx->cpu_tmp4; @@ -1688,7 +1675,7 @@ static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, /* load */ if (op1 == OR_TMP0) - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); else gen_op_mov_v_reg(tcg_ctx, ot, cpu_T0, op1); @@ -1729,7 +1716,6 @@ static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right) TCGv_i32 cpu_tmp2_i32 = tcg_ctx->cpu_tmp2_i32; TCGv_i32 cpu_tmp3_i32 = tcg_ctx->cpu_tmp3_i32; TCGv_i32 cpu_cc_op = tcg_ctx->cpu_cc_op; - TCGv cpu_A0 = tcg_ctx->cpu_A0; TCGv cpu_cc_dst = tcg_ctx->cpu_cc_dst; TCGv cpu_cc_src2 = tcg_ctx->cpu_cc_src2; TCGv cpu_T0 = tcg_ctx->cpu_T0; @@ -1737,7 +1723,7 @@ static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right) /* load */ if (op1 == OR_TMP0) { - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); } else { gen_op_mov_v_reg(tcg_ctx, ot, cpu_T0, op1); } @@ -1821,7 +1807,6 @@ static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, int mask = (ot == MO_64 ? 0x3f : 0x1f); int shift; TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = tcg_ctx->cpu_A0; TCGv cpu_cc_dst = tcg_ctx->cpu_cc_dst; TCGv cpu_cc_src2 = tcg_ctx->cpu_cc_src2; TCGv cpu_tmp0 = tcg_ctx->cpu_tmp0; @@ -1833,7 +1818,7 @@ static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, /* load */ if (op1 == OR_TMP0) { - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); } else { gen_op_mov_v_reg(tcg_ctx, ot, cpu_T0, op1); } @@ -1907,7 +1892,6 @@ static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = tcg_ctx->cpu_A0; TCGv cpu_T0 = tcg_ctx->cpu_T0; TCGv cpu_T1 = tcg_ctx->cpu_T1; @@ -1916,7 +1900,7 @@ static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1, /* load */ if (op1 == OR_TMP0) - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); else gen_op_mov_v_reg(tcg_ctx, ot, cpu_T0, op1); @@ -1970,7 +1954,6 @@ static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1, target_ulong mask = (ot == MO_64 ? 63 : 31); TCGv count; TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = tcg_ctx->cpu_A0; TCGv cpu_tmp0 = tcg_ctx->cpu_tmp0; TCGv cpu_tmp4 = tcg_ctx->cpu_tmp4; TCGv cpu_T0 = tcg_ctx->cpu_T0; @@ -1978,7 +1961,7 @@ static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1, /* load */ if (op1 == OR_TMP0) { - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); } else { gen_op_mov_v_reg(tcg_ctx, ot, cpu_T0, op1); } @@ -2308,7 +2291,6 @@ static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s, static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = tcg_ctx->cpu_A0; TCGv *cpu_regs = tcg_ctx->cpu_regs; TCGv ea = NULL; @@ -2316,22 +2298,22 @@ static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a) if (a.scale == 0) { ea = cpu_regs[a.index]; } else { - tcg_gen_shli_tl(tcg_ctx, cpu_A0, cpu_regs[a.index], a.scale); - ea = cpu_A0; + tcg_gen_shli_tl(tcg_ctx, s->A0, cpu_regs[a.index], a.scale); + ea = s->A0; } if (a.base >= 0) { - tcg_gen_add_tl(tcg_ctx, cpu_A0, ea, cpu_regs[a.base]); - ea = cpu_A0; + tcg_gen_add_tl(tcg_ctx, s->A0, ea, cpu_regs[a.base]); + ea = s->A0; } } else if (a.base >= 0) { ea = cpu_regs[a.base]; } if (!ea) { - tcg_gen_movi_tl(tcg_ctx, cpu_A0, a.disp); - ea = cpu_A0; + tcg_gen_movi_tl(tcg_ctx, s->A0, a.disp); + ea = s->A0; } else if (a.disp != 0) { - tcg_gen_addi_tl(tcg_ctx, cpu_A0, ea, a.disp); - ea = cpu_A0; + tcg_gen_addi_tl(tcg_ctx, s->A0, ea, a.disp); + ea = s->A0; } return ea; @@ -2371,10 +2353,7 @@ static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm, /* used for LEA and MOV AX, mem */ static void gen_add_A0_ds_seg(DisasContext *s) { - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = tcg_ctx->cpu_A0; - - gen_lea_v_seg(s, s->aflag, cpu_A0, R_DS, s->override); + gen_lea_v_seg(s, s->aflag, s->A0, R_DS, s->override); } /* generate modrm memory load or store of 'reg'. TMP0 is used if reg == @@ -2384,10 +2363,8 @@ static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm, { int mod, rm; TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = tcg_ctx->cpu_A0; TCGv cpu_T0 = tcg_ctx->cpu_T0; - mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); if (mod == 3) { @@ -2405,9 +2382,9 @@ static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm, if (is_store) { if (reg != OR_TMP0) gen_op_mov_v_reg(tcg_ctx, ot, cpu_T0, reg); - gen_op_st_v(s, ot, cpu_T0, cpu_A0); + gen_op_st_v(s, ot, cpu_T0, s->A0); } else { - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); if (reg != OR_TMP0) gen_op_mov_reg_v(tcg_ctx, ot, reg, cpu_T0); } @@ -2620,22 +2597,21 @@ static void gen_push_v(DisasContext *s, TCGv val) TCGMemOp a_ot = mo_stacksize(s); int size = 1 << d_ot; TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = tcg_ctx->cpu_A0; TCGv cpu_tmp4 = tcg_ctx->cpu_tmp4; - TCGv new_esp = cpu_A0; + TCGv new_esp = s->A0; TCGv *cpu_regs = tcg_ctx->cpu_regs; - tcg_gen_subi_tl(tcg_ctx, cpu_A0, cpu_regs[R_ESP], size); + tcg_gen_subi_tl(tcg_ctx, s->A0, cpu_regs[R_ESP], size); if (!CODE64(s)) { if (s->addseg) { new_esp = cpu_tmp4; - tcg_gen_mov_tl(tcg_ctx, new_esp, cpu_A0); + tcg_gen_mov_tl(tcg_ctx, new_esp, s->A0); } - gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1); + gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); } - gen_op_st_v(s, d_ot, val, cpu_A0); + gen_op_st_v(s, d_ot, val, s->A0); gen_op_mov_reg_v(tcg_ctx, a_ot, R_ESP, new_esp); } @@ -2644,12 +2620,11 @@ static TCGMemOp gen_pop_T0(DisasContext *s) { TCGMemOp d_ot = mo_pushpop(s, s->dflag); TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = tcg_ctx->cpu_A0; TCGv cpu_T0 = tcg_ctx->cpu_T0; TCGv *cpu_regs = tcg_ctx->cpu_regs; gen_lea_v_seg(s, mo_stacksize(s), cpu_regs[R_ESP], R_SS, -1); - gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, d_ot, cpu_T0, s->A0); return d_ot; } @@ -2674,13 +2649,12 @@ static void gen_pusha(DisasContext *s) int size = 1 << d_ot; int i; TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = tcg_ctx->cpu_A0; TCGv *cpu_regs = tcg_ctx->cpu_regs; for (i = 0; i < 8; i++) { - tcg_gen_addi_tl(tcg_ctx, cpu_A0, cpu_regs[R_ESP], (i - 8) * size); - gen_lea_v_seg(s, s_ot, cpu_A0, R_SS, -1); - gen_op_st_v(s, d_ot, cpu_regs[7 - i], cpu_A0); + tcg_gen_addi_tl(tcg_ctx, s->A0, cpu_regs[R_ESP], (i - 8) * size); + gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1); + gen_op_st_v(s, d_ot, cpu_regs[7 - i], s->A0); } gen_stack_update(s, -8 * size); @@ -2693,7 +2667,6 @@ static void gen_popa(DisasContext *s) int size = 1 << d_ot; int i; TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = tcg_ctx->cpu_A0; TCGv cpu_T0 = tcg_ctx->cpu_T0; TCGv *cpu_regs = tcg_ctx->cpu_regs; @@ -2702,9 +2675,9 @@ static void gen_popa(DisasContext *s) if (7 - i == R_ESP) { continue; } - tcg_gen_addi_tl(tcg_ctx, cpu_A0, cpu_regs[R_ESP], i * size); - gen_lea_v_seg(s, s_ot, cpu_A0, R_SS, -1); - gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0); + tcg_gen_addi_tl(tcg_ctx, s->A0, cpu_regs[R_ESP], i * size); + gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1); + gen_op_ld_v(s, d_ot, cpu_T0, s->A0); gen_op_mov_reg_v(tcg_ctx, d_ot, 7 - i, cpu_T0); } @@ -2717,7 +2690,6 @@ static void gen_enter(DisasContext *s, int esp_addend, int level) TCGMemOp a_ot = CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16; int size = 1 << d_ot; TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = tcg_ctx->cpu_A0; TCGv cpu_tmp0 = tcg_ctx->cpu_tmp0; TCGv cpu_T1 = tcg_ctx->cpu_T1; TCGv *cpu_regs = tcg_ctx->cpu_regs; @@ -2725,7 +2697,7 @@ static void gen_enter(DisasContext *s, int esp_addend, int level) /* Push BP; compute FrameTemp into T1. */ tcg_gen_subi_tl(tcg_ctx, cpu_T1, cpu_regs[R_ESP], size); gen_lea_v_seg(s, a_ot, cpu_T1, R_SS, -1); - gen_op_st_v(s, d_ot, cpu_regs[R_EBP], cpu_A0); + gen_op_st_v(s, d_ot, cpu_regs[R_EBP], s->A0); level &= 31; if (level != 0) { @@ -2733,19 +2705,19 @@ static void gen_enter(DisasContext *s, int esp_addend, int level) /* Copy level-1 pointers from the previous frame. */ for (i = 1; i < level; ++i) { - tcg_gen_subi_tl(tcg_ctx, cpu_A0, cpu_regs[R_EBP], size * i); - gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1); - gen_op_ld_v(s, d_ot, cpu_tmp0, cpu_A0); + tcg_gen_subi_tl(tcg_ctx, s->A0, cpu_regs[R_EBP], size * i); + gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); + gen_op_ld_v(s, d_ot, cpu_tmp0, s->A0); - tcg_gen_subi_tl(tcg_ctx, cpu_A0, cpu_T1, size * i); - gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1); - gen_op_st_v(s, d_ot, cpu_tmp0, cpu_A0); + tcg_gen_subi_tl(tcg_ctx, s->A0, cpu_T1, size * i); + gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); + gen_op_st_v(s, d_ot, cpu_tmp0, s->A0); } /* Push the current FrameTemp as the last level. */ - tcg_gen_subi_tl(tcg_ctx, cpu_A0, cpu_T1, size * level); - gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1); - gen_op_st_v(s, d_ot, cpu_T1, cpu_A0); + tcg_gen_subi_tl(tcg_ctx, s->A0, cpu_T1, size * level); + gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); + gen_op_st_v(s, d_ot, cpu_T1, s->A0); } /* Copy the FrameTemp value to EBP. */ @@ -2761,13 +2733,12 @@ static void gen_leave(DisasContext *s) TCGMemOp d_ot = mo_pushpop(s, s->dflag); TCGMemOp a_ot = mo_stacksize(s); TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = tcg_ctx->cpu_A0; TCGv cpu_T0 = tcg_ctx->cpu_T0; TCGv cpu_T1 = tcg_ctx->cpu_T1; TCGv *cpu_regs = tcg_ctx->cpu_regs; gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1); - gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, d_ot, cpu_T0, s->A0); tcg_gen_addi_tl(tcg_ctx, cpu_T1, cpu_regs[R_EBP], 1 << d_ot); @@ -2964,9 +2935,8 @@ static inline void gen_ldq_env_A0(DisasContext *s, int offset) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 cpu_tmp1_i64 = tcg_ctx->cpu_tmp1_i64; - TCGv cpu_A0 = tcg_ctx->cpu_A0; - tcg_gen_qemu_ld_i64(s->uc, cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); + tcg_gen_qemu_ld_i64(s->uc, cpu_tmp1_i64, s->A0, s->mem_index, MO_LEQ); tcg_gen_st_i64(tcg_ctx, cpu_tmp1_i64, tcg_ctx->cpu_env, offset); } @@ -2974,10 +2944,9 @@ static inline void gen_stq_env_A0(DisasContext *s, int offset) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 cpu_tmp1_i64 = tcg_ctx->cpu_tmp1_i64; - TCGv cpu_A0 = tcg_ctx->cpu_A0; tcg_gen_ld_i64(tcg_ctx, cpu_tmp1_i64, tcg_ctx->cpu_env, offset); - tcg_gen_qemu_st_i64(s->uc, cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); + tcg_gen_qemu_st_i64(s->uc, cpu_tmp1_i64, s->A0, s->mem_index, MO_LEQ); } static inline void gen_ldo_env_A0(DisasContext *s, int offset) @@ -2985,12 +2954,11 @@ static inline void gen_ldo_env_A0(DisasContext *s, int offset) int mem_index = s->mem_index; TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 cpu_tmp1_i64 = tcg_ctx->cpu_tmp1_i64; - TCGv cpu_A0 = tcg_ctx->cpu_A0; TCGv cpu_tmp0 = tcg_ctx->cpu_tmp0; - tcg_gen_qemu_ld_i64(s->uc, cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ); + tcg_gen_qemu_ld_i64(s->uc, cpu_tmp1_i64, s->A0, mem_index, MO_LEQ); tcg_gen_st_i64(tcg_ctx, cpu_tmp1_i64, tcg_ctx->cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0))); - tcg_gen_addi_tl(tcg_ctx, cpu_tmp0, cpu_A0, 8); + tcg_gen_addi_tl(tcg_ctx, cpu_tmp0, s->A0, 8); tcg_gen_qemu_ld_i64(s->uc, cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ); tcg_gen_st_i64(tcg_ctx, cpu_tmp1_i64, tcg_ctx->cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1))); } @@ -3000,12 +2968,11 @@ static inline void gen_sto_env_A0(DisasContext *s, int offset) int mem_index = s->mem_index; TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 cpu_tmp1_i64 = tcg_ctx->cpu_tmp1_i64; - TCGv cpu_A0 = tcg_ctx->cpu_A0; TCGv cpu_tmp0 = tcg_ctx->cpu_tmp0; tcg_gen_ld_i64(tcg_ctx, cpu_tmp1_i64, tcg_ctx->cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0))); - tcg_gen_qemu_st_i64(s->uc, cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ); - tcg_gen_addi_tl(tcg_ctx, cpu_tmp0, cpu_A0, 8); + tcg_gen_qemu_st_i64(s->uc, cpu_tmp1_i64, s->A0, mem_index, MO_LEQ); + tcg_gen_addi_tl(tcg_ctx, cpu_tmp0, s->A0, 8); tcg_gen_ld_i64(tcg_ctx, cpu_tmp1_i64, tcg_ctx->cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1))); tcg_gen_qemu_st_i64(s->uc, cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ); } @@ -3579,7 +3546,6 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, TCGv_i32 cpu_tmp2_i32 = tcg_ctx->cpu_tmp2_i32; TCGv_i32 cpu_tmp3_i32 = tcg_ctx->cpu_tmp3_i32; TCGv_i64 cpu_tmp1_i64 = tcg_ctx->cpu_tmp1_i64; - TCGv cpu_A0 = tcg_ctx->cpu_A0; TCGv cpu_cc_dst = tcg_ctx->cpu_cc_dst; TCGv cpu_cc_src = tcg_ctx->cpu_cc_src; TCGv cpu_cc_src2 = tcg_ctx->cpu_cc_src2; @@ -3686,7 +3652,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, } else { tcg_gen_ld32u_tl(tcg_ctx, cpu_T0, cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0))); - gen_op_st_v(s, MO_32, cpu_T0, cpu_A0); + gen_op_st_v(s, MO_32, cpu_T0, s->A0); } break; case 0x6e: /* movd mm, ea */ @@ -3751,7 +3717,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, case 0x210: /* movss xmm, ea */ if (mod != 3) { gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0); + gen_op_ld_v(s, MO_32, cpu_T0, s->A0); tcg_gen_st32_tl(tcg_ctx, cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0))); tcg_gen_movi_tl(tcg_ctx, cpu_T0, 0); tcg_gen_st32_tl(tcg_ctx, cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1))); @@ -3938,7 +3904,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, if (mod != 3) { gen_lea_modrm(env, s, modrm); tcg_gen_ld32u_tl(tcg_ctx, cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0))); - gen_op_st_v(s, MO_32, cpu_T0, cpu_A0); + gen_op_st_v(s, MO_32, cpu_T0, s->A0); } else { rm = (modrm & 7) | REX_B(s); gen_op_movl(tcg_ctx, offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)), @@ -4113,7 +4079,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, if ((b >> 8) & 1) { gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_Q(0))); } else { - gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0); + gen_op_ld_v(s, MO_32, cpu_T0, s->A0); tcg_gen_st32_tl(tcg_ctx, cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0))); } op2_offset = offsetof(CPUX86State,xmm_t0); @@ -4252,13 +4218,13 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, break; case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */ case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */ - tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUL); tcg_gen_st_i32(tcg_ctx, cpu_tmp2_i32, cpu_env, op2_offset + offsetof(ZMMReg, ZMM_L(0))); break; case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */ - tcg_gen_qemu_ld_tl(s->uc, cpu_tmp0, cpu_A0, + tcg_gen_qemu_ld_tl(s->uc, cpu_tmp0, s->A0, s->mem_index, MO_LEUW); tcg_gen_st16_tl(tcg_ctx, cpu_tmp0, cpu_env, op2_offset + offsetof(ZMMReg, ZMM_W(0))); @@ -4347,11 +4313,11 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, gen_lea_modrm(env, s, modrm); if ((b & 1) == 0) { - tcg_gen_qemu_ld_tl(s->uc, cpu_T0, cpu_A0, + tcg_gen_qemu_ld_tl(s->uc, cpu_T0, s->A0, s->mem_index, ot | MO_BE); gen_op_mov_reg_v(tcg_ctx, ot, reg, cpu_T0); } else { - tcg_gen_qemu_st_tl(s->uc, cpu_regs[reg], cpu_A0, + tcg_gen_qemu_st_tl(s->uc, cpu_regs[reg], s->A0, s->mem_index, ot | MO_BE); } break; @@ -4383,23 +4349,23 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); /* Extract START, and shift the operand. Shifts larger than operand size get zeros. */ - tcg_gen_ext8u_tl(tcg_ctx, cpu_A0, cpu_regs[s->vex_v]); - tcg_gen_shr_tl(tcg_ctx, cpu_T0, cpu_T0, cpu_A0); + tcg_gen_ext8u_tl(tcg_ctx, s->A0, cpu_regs[s->vex_v]); + tcg_gen_shr_tl(tcg_ctx, cpu_T0, cpu_T0, s->A0); bound = tcg_const_tl(tcg_ctx, ot == MO_64 ? 63 : 31); zero = tcg_const_tl(tcg_ctx, 0); - tcg_gen_movcond_tl(tcg_ctx, TCG_COND_LEU, cpu_T0, cpu_A0, bound, + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_LEU, cpu_T0, s->A0, bound, cpu_T0, zero); tcg_temp_free(tcg_ctx, zero); /* Extract the LEN into a mask. Lengths larger than operand size get all ones. */ - tcg_gen_extract_tl(tcg_ctx, cpu_A0, cpu_regs[s->vex_v], 8, 8); - tcg_gen_movcond_tl(tcg_ctx, TCG_COND_LEU, cpu_A0, cpu_A0, bound, - cpu_A0, bound); + tcg_gen_extract_tl(tcg_ctx, s->A0, cpu_regs[s->vex_v], 8, 8); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_LEU, s->A0, s->A0, bound, + s->A0, bound); tcg_temp_free(tcg_ctx, bound); tcg_gen_movi_tl(tcg_ctx, cpu_T1, 1); - tcg_gen_shl_tl(tcg_ctx, cpu_T1, cpu_T1, cpu_A0); + tcg_gen_shl_tl(tcg_ctx, cpu_T1, cpu_T1, s->A0); tcg_gen_subi_tl(tcg_ctx, cpu_T1, cpu_T1, 1); tcg_gen_and_tl(tcg_ctx, cpu_T0, cpu_T0, cpu_T1); @@ -4428,9 +4394,9 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, bound, bound, cpu_T1); tcg_temp_free(tcg_ctx, bound); } - tcg_gen_movi_tl(tcg_ctx, cpu_A0, -1); - tcg_gen_shl_tl(tcg_ctx, cpu_A0, cpu_A0, cpu_T1); - tcg_gen_andc_tl(tcg_ctx, cpu_T0, cpu_T0, cpu_A0); + tcg_gen_movi_tl(tcg_ctx, s->A0, -1); + tcg_gen_shl_tl(tcg_ctx, s->A0, s->A0, cpu_T1); + tcg_gen_andc_tl(tcg_ctx, cpu_T0, cpu_T0, s->A0); gen_op_mov_reg_v(tcg_ctx, ot, reg, cpu_T0); gen_op_update1_cc(tcg_ctx); set_cc_op(s, CC_OP_BMILGB + ot); @@ -4682,7 +4648,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, if (mod == 3) { gen_op_mov_reg_v(tcg_ctx, ot, rm, cpu_T0); } else { - tcg_gen_qemu_st_tl(s->uc, cpu_T0, cpu_A0, + tcg_gen_qemu_st_tl(s->uc, cpu_T0, s->A0, s->mem_index, MO_UB); } break; @@ -4692,7 +4658,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, if (mod == 3) { gen_op_mov_reg_v(tcg_ctx, ot, rm, cpu_T0); } else { - tcg_gen_qemu_st_tl(s->uc, cpu_T0, cpu_A0, + tcg_gen_qemu_st_tl(s->uc, cpu_T0, s->A0, s->mem_index, MO_LEUW); } break; @@ -4704,7 +4670,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, if (mod == 3) { tcg_gen_extu_i32_tl(tcg_ctx, cpu_regs[rm], cpu_tmp2_i32); } else { - tcg_gen_qemu_st_i32(s->uc, cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_st_i32(s->uc, cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUL); } } else { /* pextrq */ @@ -4715,7 +4681,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, if (mod == 3) { tcg_gen_mov_i64(tcg_ctx, cpu_regs[rm], cpu_tmp1_i64); } else { - tcg_gen_qemu_st_i64(s->uc, cpu_tmp1_i64, cpu_A0, + tcg_gen_qemu_st_i64(s->uc, cpu_tmp1_i64, s->A0, s->mem_index, MO_LEQ); } #else @@ -4729,7 +4695,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, if (mod == 3) { gen_op_mov_reg_v(tcg_ctx, ot, rm, cpu_T0); } else { - tcg_gen_qemu_st_tl(s->uc, cpu_T0, cpu_A0, + tcg_gen_qemu_st_tl(s->uc, cpu_T0, s->A0, s->mem_index, MO_LEUL); } break; @@ -4737,7 +4703,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, if (mod == 3) { gen_op_mov_v_reg(tcg_ctx, MO_32, cpu_T0, rm); } else { - tcg_gen_qemu_ld_tl(s->uc, cpu_T0, cpu_A0, + tcg_gen_qemu_ld_tl(s->uc, cpu_T0, s->A0, s->mem_index, MO_UB); } tcg_gen_st8_tl(tcg_ctx, cpu_T0, cpu_env, offsetof(CPUX86State, @@ -4749,7 +4715,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, offsetof(CPUX86State,xmm_regs[rm] .ZMM_L((val >> 6) & 3))); } else { - tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUL); } tcg_gen_st_i32(tcg_ctx, cpu_tmp2_i32, cpu_env, @@ -4777,7 +4743,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, if (mod == 3) { tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, cpu_regs[rm]); } else { - tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUL); } tcg_gen_st_i32(tcg_ctx, cpu_tmp2_i32, cpu_env, @@ -4788,7 +4754,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, if (mod == 3) { gen_op_mov_v_reg(tcg_ctx, ot, cpu_tmp1_i64, rm); } else { - tcg_gen_qemu_ld_i64(s->uc, cpu_tmp1_i64, cpu_A0, + tcg_gen_qemu_ld_i64(s->uc, cpu_tmp1_i64, s->A0, s->mem_index, MO_LEQ); } tcg_gen_st_i64(tcg_ctx, cpu_tmp1_i64, cpu_env, @@ -4915,7 +4881,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, switch (sz) { case 2: /* 32 bit access */ - gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0); + gen_op_ld_v(s, MO_32, cpu_T0, s->A0); tcg_gen_st32_tl(tcg_ctx, cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0))); break; @@ -4981,15 +4947,15 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, /* maskmov : we must prepare A0 */ if (mod != 3) goto illegal_op; - tcg_gen_mov_tl(tcg_ctx, cpu_A0, cpu_regs[R_EDI]); - gen_extu(tcg_ctx, s->aflag, cpu_A0); + tcg_gen_mov_tl(tcg_ctx, s->A0, cpu_regs[R_EDI]); + gen_extu(tcg_ctx, s->aflag, s->A0); gen_add_A0_ds_seg(s); tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(tcg_ctx, cpu_ptr1, cpu_env, op2_offset); /* XXX: introduce a new table? */ sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp; - sse_fn_eppt(tcg_ctx, cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0); + sse_fn_eppt(tcg_ctx, cpu_env, cpu_ptr0, cpu_ptr1, s->A0); break; default: tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, op1_offset); @@ -5044,7 +5010,6 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) TCGv_i32 cpu_tmp2_i32 = tcg_ctx->cpu_tmp2_i32; TCGv_i32 cpu_tmp3_i32 = tcg_ctx->cpu_tmp3_i32; TCGv_i64 cpu_tmp1_i64 = tcg_ctx->cpu_tmp1_i64; - TCGv cpu_A0 = tcg_ctx->cpu_A0; TCGv cpu_cc_dst = tcg_ctx->cpu_cc_dst; TCGv cpu_cc_src = tcg_ctx->cpu_cc_src; TCGv cpu_tmp0 = tcg_ctx->cpu_tmp0; @@ -5333,7 +5298,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) rm = (modrm & 7) | REX_B(s); if (mod != 3) { gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, ot, cpu_T1, cpu_A0); + gen_op_ld_v(s, ot, cpu_T1, s->A0); } else if (op == OP_XORL && rm == reg) { goto xor_zero; } else { @@ -5421,7 +5386,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) /* For those below that handle locked memory, don't load here. */ if (!(s->prefix & PREFIX_LOCK) || op != 2) { - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); } } else { gen_op_mov_v_reg(tcg_ctx, ot, cpu_T0, rm); @@ -5440,12 +5405,12 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; } tcg_gen_movi_tl(tcg_ctx, cpu_T0, ~0); - tcg_gen_atomic_xor_fetch_tl(tcg_ctx, cpu_T0, cpu_A0, cpu_T0, + tcg_gen_atomic_xor_fetch_tl(tcg_ctx, cpu_T0, s->A0, cpu_T0, s->mem_index, ot | MO_LE); } else { tcg_gen_not_tl(tcg_ctx, cpu_T0, cpu_T0); if (mod != 3) { - gen_op_st_v(s, ot, cpu_T0, cpu_A0); + gen_op_st_v(s, ot, cpu_T0, s->A0); } else { gen_op_mov_reg_v(tcg_ctx, ot, rm, cpu_T0); } @@ -5463,7 +5428,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) t0 = tcg_temp_local_new(tcg_ctx); label1 = gen_new_label(tcg_ctx); - tcg_gen_mov_tl(tcg_ctx, a0, cpu_A0); + tcg_gen_mov_tl(tcg_ctx, a0, s->A0); tcg_gen_mov_tl(tcg_ctx, t0, cpu_T0); gen_set_label(tcg_ctx, label1); @@ -5483,7 +5448,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } else { tcg_gen_neg_tl(tcg_ctx, cpu_T0, cpu_T0); if (mod != 3) { - gen_op_st_v(s, ot, cpu_T0, cpu_A0); + gen_op_st_v(s, ot, cpu_T0, s->A0); } else { gen_op_mov_reg_v(tcg_ctx, ot, rm, cpu_T0); } @@ -5662,7 +5627,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (mod != 3) { gen_lea_modrm(env, s, modrm); if (op >= 2 && op != 3 && op != 5) - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); } else { gen_op_mov_v_reg(tcg_ctx, ot, cpu_T0, rm); } @@ -5695,9 +5660,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_jr(s, cpu_T0); break; case 3: /* lcall Ev */ - gen_op_ld_v(s, ot, cpu_T1, cpu_A0); + gen_op_ld_v(s, ot, cpu_T1, s->A0); gen_add_A0_im(s, 1 << ot); - gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0); + gen_op_ld_v(s, MO_16, cpu_T0, s->A0); do_lcall: if (s->pe && !s->vm86) { tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, cpu_T0); @@ -5722,9 +5687,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_jr(s, cpu_T0); break; case 5: /* ljmp Ev */ - gen_op_ld_v(s, ot, cpu_T1, cpu_A0); + gen_op_ld_v(s, ot, cpu_T1, s->A0); gen_add_A0_im(s, 1 << ot); - gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0); + gen_op_ld_v(s, MO_16, cpu_T0, s->A0); do_ljmp: if (s->pe && !s->vm86) { tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, cpu_T0); @@ -5886,13 +5851,13 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } else { gen_lea_modrm(env, s, modrm); if (s->prefix & PREFIX_LOCK) { - tcg_gen_atomic_fetch_add_tl(tcg_ctx, cpu_T1, cpu_A0, cpu_T0, + tcg_gen_atomic_fetch_add_tl(tcg_ctx, cpu_T1, s->A0, cpu_T0, s->mem_index, ot | MO_LE); tcg_gen_add_tl(tcg_ctx, cpu_T0, cpu_T0, cpu_T1); } else { - gen_op_ld_v(s, ot, cpu_T1, cpu_A0); + gen_op_ld_v(s, ot, cpu_T1, s->A0); tcg_gen_add_tl(tcg_ctx, cpu_T0, cpu_T0, cpu_T1); - gen_op_st_v(s, ot, cpu_T0, cpu_A0); + gen_op_st_v(s, ot, cpu_T0, s->A0); } gen_op_mov_reg_v(tcg_ctx, ot, reg, cpu_T1); } @@ -5918,7 +5883,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (mod == 3) { goto illegal_op; } - tcg_gen_atomic_cmpxchg_tl(tcg_ctx, oldv, cpu_A0, cmpv, newv, + tcg_gen_atomic_cmpxchg_tl(tcg_ctx, oldv, s->A0, cmpv, newv, s->mem_index, ot | MO_LE); gen_op_mov_reg_v(tcg_ctx, ot, R_EAX, oldv); } else { @@ -5927,7 +5892,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_op_mov_v_reg(tcg_ctx, ot, oldv, rm); } else { gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, ot, oldv, cpu_A0); + gen_op_ld_v(s, ot, oldv, s->A0); rm = 0; /* avoid warning */ } gen_extu(tcg_ctx, ot, oldv); @@ -5942,7 +5907,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) must be before changing accumulator to ensure idempotency if the store faults and the instruction is restarted */ - gen_op_st_v(s, ot, newv, cpu_A0); + gen_op_st_v(s, ot, newv, s->A0); gen_op_mov_reg_v(tcg_ctx, ot, R_EAX, oldv); } } @@ -5966,9 +5931,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; gen_lea_modrm(env, s, modrm); if ((s->prefix & PREFIX_LOCK) && tcg_ctx->uc->parallel_cpus) { - gen_helper_cmpxchg16b(tcg_ctx, cpu_env, cpu_A0); + gen_helper_cmpxchg16b(tcg_ctx, cpu_env, s->A0); } else { - gen_helper_cmpxchg16b_unlocked(tcg_ctx, cpu_env, cpu_A0); + gen_helper_cmpxchg16b_unlocked(tcg_ctx, cpu_env, s->A0); } } else #endif @@ -5977,9 +5942,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; gen_lea_modrm(env, s, modrm); if ((s->prefix & PREFIX_LOCK) && tcg_ctx->uc->parallel_cpus) { - gen_helper_cmpxchg8b(tcg_ctx, cpu_env, cpu_A0); + gen_helper_cmpxchg8b(tcg_ctx, cpu_env, s->A0); } else { - gen_helper_cmpxchg8b_unlocked(tcg_ctx, cpu_env, cpu_A0); + gen_helper_cmpxchg8b_unlocked(tcg_ctx, cpu_env, s->A0); } } set_cc_op(s, CC_OP_EFLAGS); @@ -6124,7 +6089,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) val = insn_get(env, s, ot); tcg_gen_movi_tl(tcg_ctx, cpu_T0, val); if (mod != 3) { - gen_op_st_v(s, ot, cpu_T0, cpu_A0); + gen_op_st_v(s, ot, cpu_T0, s->A0); } else { gen_op_mov_reg_v(tcg_ctx, ot, (modrm & 7) | REX_B(s), cpu_T0); } @@ -6211,7 +6176,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_op_mov_reg_v(tcg_ctx, d_ot, reg, cpu_T0); } else { gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, s_ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, s_ot, cpu_T0, s->A0); gen_op_mov_reg_v(tcg_ctx, d_ot, reg, cpu_T0); } } @@ -6227,7 +6192,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) AddressParts a = gen_lea_modrm_0(env, s, modrm); TCGv ea = gen_lea_modrm_1(s, a); gen_lea_v_seg(s, s->aflag, ea, -1, -1); - gen_op_mov_reg_v(tcg_ctx, dflag, reg, cpu_A0); + gen_op_mov_reg_v(tcg_ctx, dflag, reg, s->A0); } break; @@ -6249,24 +6214,24 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) offset_addr = insn_get(env, s, s->aflag); break; } - tcg_gen_movi_tl(tcg_ctx, cpu_A0, offset_addr); + tcg_gen_movi_tl(tcg_ctx, s->A0, offset_addr); gen_add_A0_ds_seg(s); if ((b & 2) == 0) { - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); gen_op_mov_reg_v(tcg_ctx, ot, R_EAX, cpu_T0); } else { gen_op_mov_v_reg(tcg_ctx, ot, cpu_T0, R_EAX); - gen_op_st_v(s, ot, cpu_T0, cpu_A0); + gen_op_st_v(s, ot, cpu_T0, s->A0); } } break; case 0xd7: /* xlat */ - tcg_gen_mov_tl(tcg_ctx, cpu_A0, cpu_regs[R_EBX]); + tcg_gen_mov_tl(tcg_ctx, s->A0, cpu_regs[R_EBX]); tcg_gen_ext8u_tl(tcg_ctx, cpu_T0, cpu_regs[R_EAX]); - tcg_gen_add_tl(tcg_ctx, cpu_A0, cpu_A0, cpu_T0); - gen_extu(tcg_ctx, s->aflag, cpu_A0); + tcg_gen_add_tl(tcg_ctx, s->A0, s->A0, cpu_T0); + gen_extu(tcg_ctx, s->aflag, s->A0); gen_add_A0_ds_seg(s); - gen_op_ld_v(s, MO_8, cpu_T0, cpu_A0); + gen_op_ld_v(s, MO_8, cpu_T0, s->A0); gen_op_mov_reg_v(tcg_ctx, MO_8, R_EAX, cpu_T0); break; case 0xb0: case 0xb1: case 0xb2: case 0xb3: @@ -6320,7 +6285,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_lea_modrm(env, s, modrm); gen_op_mov_v_reg(tcg_ctx, ot, cpu_T0, reg); /* for xchg, lock is implicit */ - tcg_gen_atomic_xchg_tl(tcg_ctx, cpu_T1, cpu_A0, cpu_T0, + tcg_gen_atomic_xchg_tl(tcg_ctx, cpu_T1, s->A0, cpu_T0, s->mem_index, ot | MO_LE); gen_op_mov_reg_v(tcg_ctx, ot, reg, cpu_T1); } @@ -6349,10 +6314,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (mod == 3) goto illegal_op; gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, ot, cpu_T1, cpu_A0); + gen_op_ld_v(s, ot, cpu_T1, s->A0); gen_add_A0_im(s, 1 << ot); /* load the segment first to handle exceptions properly */ - gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0); + gen_op_ld_v(s, MO_16, cpu_T0, s->A0); gen_movl_seg_T0(s, op); /* then put the data */ gen_op_mov_reg_v(tcg_ctx, ot, reg, cpu_T1); @@ -6473,23 +6438,23 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) switch(op >> 4) { case 0: - tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUL); gen_helper_flds_FT0(tcg_ctx, cpu_env, cpu_tmp2_i32); break; case 1: - tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUL); gen_helper_fildl_FT0(tcg_ctx, cpu_env, cpu_tmp2_i32); break; case 2: - tcg_gen_qemu_ld_i64(s->uc, cpu_tmp1_i64, cpu_A0, + tcg_gen_qemu_ld_i64(s->uc, cpu_tmp1_i64, s->A0, s->mem_index, MO_LEQ); gen_helper_fldl_FT0(tcg_ctx, cpu_env, cpu_tmp1_i64); break; case 3: default: - tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, s->A0, s->mem_index, MO_LESW); gen_helper_fildl_FT0(tcg_ctx, cpu_env, cpu_tmp2_i32); break; @@ -6512,23 +6477,23 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0: switch(op >> 4) { case 0: - tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUL); gen_helper_flds_ST0(tcg_ctx, cpu_env, cpu_tmp2_i32); break; case 1: - tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUL); gen_helper_fildl_ST0(tcg_ctx, cpu_env, cpu_tmp2_i32); break; case 2: - tcg_gen_qemu_ld_i64(s->uc, cpu_tmp1_i64, cpu_A0, + tcg_gen_qemu_ld_i64(s->uc, cpu_tmp1_i64, s->A0, s->mem_index, MO_LEQ); gen_helper_fldl_ST0(tcg_ctx, cpu_env, cpu_tmp1_i64); break; case 3: default: - tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, s->A0, s->mem_index, MO_LESW); gen_helper_fildl_ST0(tcg_ctx, cpu_env, cpu_tmp2_i32); break; @@ -6539,18 +6504,18 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) switch(op >> 4) { case 1: gen_helper_fisttl_ST0(tcg_ctx, cpu_tmp2_i32, cpu_env); - tcg_gen_qemu_st_i32(s->uc, cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_st_i32(s->uc, cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUL); break; case 2: gen_helper_fisttll_ST0(tcg_ctx, cpu_tmp1_i64, cpu_env); - tcg_gen_qemu_st_i64(s->uc, cpu_tmp1_i64, cpu_A0, + tcg_gen_qemu_st_i64(s->uc, cpu_tmp1_i64, s->A0, s->mem_index, MO_LEQ); break; case 3: default: gen_helper_fistt_ST0(tcg_ctx, cpu_tmp2_i32, cpu_env); - tcg_gen_qemu_st_i32(s->uc, cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_st_i32(s->uc, cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUW); break; } @@ -6560,23 +6525,23 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) switch(op >> 4) { case 0: gen_helper_fsts_ST0(tcg_ctx, cpu_tmp2_i32, cpu_env); - tcg_gen_qemu_st_i32(s->uc, cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_st_i32(s->uc, cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUL); break; case 1: gen_helper_fistl_ST0(tcg_ctx, cpu_tmp2_i32, cpu_env); - tcg_gen_qemu_st_i32(s->uc, cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_st_i32(s->uc, cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUL); break; case 2: gen_helper_fstl_ST0(tcg_ctx, cpu_tmp1_i64, cpu_env); - tcg_gen_qemu_st_i64(s->uc, cpu_tmp1_i64, cpu_A0, + tcg_gen_qemu_st_i64(s->uc, cpu_tmp1_i64, s->A0, s->mem_index, MO_LEQ); break; case 3: default: gen_helper_fist_ST0(tcg_ctx, cpu_tmp2_i32, cpu_env); - tcg_gen_qemu_st_i32(s->uc, cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_st_i32(s->uc, cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUW); break; } @@ -6587,65 +6552,65 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } else if(op == 0x0c) /* fldenv mem */ { - gen_helper_fldenv(tcg_ctx, cpu_env, cpu_A0, tcg_const_i32(tcg_ctx, dflag - 1)); + gen_helper_fldenv(tcg_ctx, cpu_env, s->A0, tcg_const_i32(tcg_ctx, dflag - 1)); } else if(op == 0x0d) /* fldcw mem */ { - tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUW); gen_helper_fldcw(tcg_ctx, cpu_env, cpu_tmp2_i32); } else if(op == 0x0e) /* fnstenv mem */ { - gen_helper_fstenv(tcg_ctx, cpu_env, cpu_A0, tcg_const_i32(tcg_ctx, dflag - 1)); + gen_helper_fstenv(tcg_ctx, cpu_env, s->A0, tcg_const_i32(tcg_ctx, dflag - 1)); } else if(op == 0x0f) /* fnstcw mem */ { gen_helper_fnstcw(tcg_ctx, cpu_tmp2_i32, cpu_env); - tcg_gen_qemu_st_i32(s->uc, cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_st_i32(s->uc, cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUW); } else if(op == 0x1d) /* fldt mem */ { - gen_helper_fldt_ST0(tcg_ctx, cpu_env, cpu_A0); + gen_helper_fldt_ST0(tcg_ctx, cpu_env, s->A0); } else if(op == 0x1f) /* fstpt mem */ { - gen_helper_fstt_ST0(tcg_ctx, cpu_env, cpu_A0); + gen_helper_fstt_ST0(tcg_ctx, cpu_env, s->A0); gen_helper_fpop(tcg_ctx, cpu_env); } else if(op == 0x2c) /* frstor mem */ { - gen_helper_frstor(tcg_ctx, cpu_env, cpu_A0, tcg_const_i32(tcg_ctx, dflag - 1)); + gen_helper_frstor(tcg_ctx, cpu_env, s->A0, tcg_const_i32(tcg_ctx, dflag - 1)); } else if(op == 0x2e) /* fnsave mem */ { - gen_helper_fsave(tcg_ctx, cpu_env, cpu_A0, tcg_const_i32(tcg_ctx, dflag - 1)); + gen_helper_fsave(tcg_ctx, cpu_env, s->A0, tcg_const_i32(tcg_ctx, dflag - 1)); } else if(op == 0x2f) /* fnstsw mem */ { gen_helper_fnstsw(tcg_ctx, cpu_tmp2_i32, cpu_env); - tcg_gen_qemu_st_i32(s->uc, cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_st_i32(s->uc, cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUW); } else if(op == 0x3c) /* fbld */ { - gen_helper_fbld_ST0(tcg_ctx, cpu_env, cpu_A0); + gen_helper_fbld_ST0(tcg_ctx, cpu_env, s->A0); } else if(op == 0x3e) /* fbstp */ { - gen_helper_fbst_ST0(tcg_ctx, cpu_env, cpu_A0); + gen_helper_fbst_ST0(tcg_ctx, cpu_env, s->A0); gen_helper_fpop(tcg_ctx, cpu_env); } else if(op == 0x3d) /* fildll */ { - tcg_gen_qemu_ld_i64(s->uc, cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); + tcg_gen_qemu_ld_i64(s->uc, cpu_tmp1_i64, s->A0, s->mem_index, MO_LEQ); gen_helper_fildll_ST0(tcg_ctx, cpu_env, cpu_tmp1_i64); } else if(op == 0x3f) /* fistpll */ { gen_helper_fistll_ST0(tcg_ctx, cpu_tmp1_i64, cpu_env); - tcg_gen_qemu_st_i64(s->uc, cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); + tcg_gen_qemu_st_i64(s->uc, cpu_tmp1_i64, s->A0, s->mem_index, MO_LEQ); gen_helper_fpop(tcg_ctx, cpu_env); } else @@ -7126,13 +7091,13 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } else { gen_stack_A0(s); /* pop offset */ - gen_op_ld_v(s, dflag, cpu_T0, cpu_A0); + gen_op_ld_v(s, dflag, cpu_T0, s->A0); /* NOTE: keeping EIP updated is not a problem in case of exception */ gen_op_jmp_v(tcg_ctx, cpu_T0); /* pop selector */ gen_add_A0_im(s, 1 << dflag); - gen_op_ld_v(s, dflag, cpu_T0, cpu_A0); + gen_op_ld_v(s, dflag, cpu_T0, s->A0); gen_op_movl_seg_T0_vm(tcg_ctx, R_CS); /* add stack offset */ gen_stack_update(s, val + (2 << dflag)); @@ -7395,7 +7360,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) s->rip_offset = 1; gen_lea_modrm(env, s, modrm); if (!(s->prefix & PREFIX_LOCK)) { - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); } } else { gen_op_mov_v_reg(tcg_ctx, ot, cpu_T0, rm); @@ -7431,10 +7396,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_exts(tcg_ctx, ot, cpu_T1); tcg_gen_sari_tl(tcg_ctx, cpu_tmp0, cpu_T1, 3 + ot); tcg_gen_shli_tl(tcg_ctx, cpu_tmp0, cpu_tmp0, ot); - tcg_gen_add_tl(tcg_ctx, cpu_A0, gen_lea_modrm_1(s, a), cpu_tmp0); - gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override); + tcg_gen_add_tl(tcg_ctx, s->A0, gen_lea_modrm_1(s, a), cpu_tmp0); + gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override); if (!(s->prefix & PREFIX_LOCK)) { - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); } } else { gen_op_mov_v_reg(tcg_ctx, ot, cpu_T0, rm); @@ -7448,20 +7413,20 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0: /* bt */ /* Needs no atomic ops; we surpressed the normal memory load for LOCK above so do it now. */ - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); break; case 1: /* bts */ - tcg_gen_atomic_fetch_or_tl(tcg_ctx, cpu_T0, cpu_A0, cpu_tmp0, + tcg_gen_atomic_fetch_or_tl(tcg_ctx, cpu_T0, s->A0, cpu_tmp0, s->mem_index, ot | MO_LE); break; case 2: /* btr */ tcg_gen_not_tl(tcg_ctx, cpu_tmp0, cpu_tmp0); - tcg_gen_atomic_fetch_and_tl(tcg_ctx, cpu_T0, cpu_A0, cpu_tmp0, + tcg_gen_atomic_fetch_and_tl(tcg_ctx, cpu_T0, s->A0, cpu_tmp0, s->mem_index, ot | MO_LE); break; default: case 3: /* btc */ - tcg_gen_atomic_fetch_xor_tl(tcg_ctx, cpu_T0, cpu_A0, cpu_tmp0, + tcg_gen_atomic_fetch_xor_tl(tcg_ctx, cpu_T0, s->A0, cpu_tmp0, s->mem_index, ot | MO_LE); break; } @@ -7485,7 +7450,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } if (op != 0) { if (mod != 3) { - gen_op_st_v(s, ot, cpu_T0, cpu_A0); + gen_op_st_v(s, ot, cpu_T0, s->A0); } else { gen_op_mov_reg_v(tcg_ctx, ot, rm, cpu_T0); } @@ -7716,9 +7681,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_lea_modrm(env, s, modrm); tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, cpu_T0); if (ot == MO_16) { - gen_helper_boundw(tcg_ctx, cpu_env, cpu_A0, cpu_tmp2_i32); + gen_helper_boundw(tcg_ctx, cpu_env, s->A0, cpu_tmp2_i32); } else { - gen_helper_boundl(tcg_ctx, cpu_env, cpu_A0, cpu_tmp2_i32); + gen_helper_boundl(tcg_ctx, cpu_env, s->A0, cpu_tmp2_i32); } break; case 0x1c8: case 0x1c9: case 0x1ca: case 0x1cb: @@ -7948,13 +7913,13 @@ case 0x101: gen_lea_modrm(env, s, modrm); tcg_gen_ld32u_tl(tcg_ctx, cpu_T0, cpu_env, offsetof(CPUX86State, gdt.limit)); - gen_op_st_v(s, MO_16, cpu_T0, cpu_A0); + gen_op_st_v(s, MO_16, cpu_T0, s->A0); gen_add_A0_im(s, 2); tcg_gen_ld_tl(tcg_ctx, cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base)); if (dflag == MO_16) { tcg_gen_andi_tl(tcg_ctx, cpu_T0, cpu_T0, 0xffffff); } - gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0); + gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, s->A0); break; case 0xc8: /* monitor */ @@ -7963,10 +7928,10 @@ case 0x101: } gen_update_cc_op(s); gen_jmp_im(s, pc_start - s->cs_base); - tcg_gen_mov_tl(tcg_ctx, cpu_A0, cpu_regs[R_EAX]); - gen_extu(tcg_ctx, s->aflag, cpu_A0); + tcg_gen_mov_tl(tcg_ctx, s->A0, cpu_regs[R_EAX]); + gen_extu(tcg_ctx, s->aflag, s->A0); gen_add_A0_ds_seg(s); - gen_helper_monitor(tcg_ctx, cpu_env, cpu_A0); + gen_helper_monitor(tcg_ctx, cpu_env, s->A0); break; case 0xc9: /* mwait */ @@ -8004,13 +7969,13 @@ case 0x101: gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ); gen_lea_modrm(env, s, modrm); tcg_gen_ld32u_tl(tcg_ctx, cpu_T0, cpu_env, offsetof(CPUX86State, idt.limit)); - gen_op_st_v(s, MO_16, cpu_T0, cpu_A0); + gen_op_st_v(s, MO_16, cpu_T0, s->A0); gen_add_A0_im(s, 2); tcg_gen_ld_tl(tcg_ctx, cpu_T0, cpu_env, offsetof(CPUX86State, idt.base)); if (dflag == MO_16) { tcg_gen_andi_tl(tcg_ctx, cpu_T0, cpu_T0, 0xffffff); } - gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0); + gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, s->A0); break; case 0xd0: /* xgetbv */ @@ -8158,9 +8123,9 @@ case 0x101: } gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_WRITE); gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0); + gen_op_ld_v(s, MO_16, cpu_T1, s->A0); gen_add_A0_im(s, 2); - gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0); + gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, s->A0); if (dflag == MO_16) { tcg_gen_andi_tl(tcg_ctx, cpu_T0, cpu_T0, 0xffffff); } @@ -8175,9 +8140,9 @@ case 0x101: } gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_WRITE); gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0); + gen_op_ld_v(s, MO_16, cpu_T1, s->A0); gen_add_A0_im(s, 2); - gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0); + gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, s->A0); if (dflag == MO_16) { tcg_gen_andi_tl(tcg_ctx, cpu_T0, cpu_T0, 0xffffff); } @@ -8233,7 +8198,7 @@ case 0x101: gen_update_cc_op(s); gen_jmp_im(s, pc_start - s->cs_base); gen_lea_modrm(env, s, modrm); - gen_helper_invlpg(tcg_ctx, cpu_env, cpu_A0); + gen_helper_invlpg(tcg_ctx, cpu_env, s->A0); gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); break; @@ -8308,7 +8273,7 @@ case 0x101: gen_op_mov_reg_v(tcg_ctx, d_ot, reg, cpu_T0); } else { gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T0, cpu_A0); + gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T0, s->A0); gen_op_mov_reg_v(tcg_ctx, d_ot, reg, cpu_T0); } } else @@ -8329,9 +8294,9 @@ case 0x101: rm = modrm & 7; if (mod != 3) { gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, ot, t0, cpu_A0); + gen_op_ld_v(s, ot, t0, s->A0); a0 = tcg_temp_local_new(tcg_ctx); - tcg_gen_mov_tl(tcg_ctx, a0, cpu_A0); + tcg_gen_mov_tl(tcg_ctx, a0, s->A0); } else { gen_op_mov_v_reg(tcg_ctx, ot, t0, rm); a0 = NULL; @@ -8447,16 +8412,16 @@ case 0x101: } else { gen_lea_modrm(env, s, modrm); if (CODE64(s)) { - tcg_gen_qemu_ld_i64(s->uc, tcg_ctx->cpu_bndl[reg], cpu_A0, + tcg_gen_qemu_ld_i64(s->uc, tcg_ctx->cpu_bndl[reg], s->A0, s->mem_index, MO_LEQ); - tcg_gen_addi_tl(tcg_ctx, cpu_A0, cpu_A0, 8); - tcg_gen_qemu_ld_i64(s->uc, tcg_ctx->cpu_bndu[reg], cpu_A0, + tcg_gen_addi_tl(tcg_ctx, s->A0, s->A0, 8); + tcg_gen_qemu_ld_i64(s->uc, tcg_ctx->cpu_bndu[reg], s->A0, s->mem_index, MO_LEQ); } else { - tcg_gen_qemu_ld_i64(s->uc, tcg_ctx->cpu_bndl[reg], cpu_A0, + tcg_gen_qemu_ld_i64(s->uc, tcg_ctx->cpu_bndl[reg], s->A0, s->mem_index, MO_LEUL); - tcg_gen_addi_tl(tcg_ctx, cpu_A0, cpu_A0, 4); - tcg_gen_qemu_ld_i64(s->uc, tcg_ctx->cpu_bndu[reg], cpu_A0, + tcg_gen_addi_tl(tcg_ctx, s->A0, s->A0, 4); + tcg_gen_qemu_ld_i64(s->uc, tcg_ctx->cpu_bndu[reg], s->A0, s->mem_index, MO_LEUL); } /* bnd registers are now in-use */ @@ -8472,22 +8437,22 @@ case 0x101: goto illegal_op; } if (a.base >= 0) { - tcg_gen_addi_tl(tcg_ctx, cpu_A0, cpu_regs[a.base], a.disp); + tcg_gen_addi_tl(tcg_ctx, s->A0, cpu_regs[a.base], a.disp); } else { - tcg_gen_movi_tl(tcg_ctx, cpu_A0, 0); + tcg_gen_movi_tl(tcg_ctx, s->A0, 0); } - gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override); + gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override); if (a.index >= 0) { tcg_gen_mov_tl(tcg_ctx, cpu_T0, cpu_regs[a.index]); } else { tcg_gen_movi_tl(tcg_ctx, cpu_T0, 0); } if (CODE64(s)) { - gen_helper_bndldx64(tcg_ctx, tcg_ctx->cpu_bndl[reg], cpu_env, cpu_A0, cpu_T0); + gen_helper_bndldx64(tcg_ctx, tcg_ctx->cpu_bndl[reg], cpu_env, s->A0, cpu_T0); tcg_gen_ld_i64(tcg_ctx, tcg_ctx->cpu_bndu[reg], cpu_env, offsetof(CPUX86State, mmx_t0.MMX_Q(0))); } else { - gen_helper_bndldx32(tcg_ctx, tcg_ctx->cpu_bndu[reg], cpu_env, cpu_A0, cpu_T0); + gen_helper_bndldx32(tcg_ctx, tcg_ctx->cpu_bndu[reg], cpu_env, s->A0, cpu_T0); tcg_gen_ext32u_i64(tcg_ctx, tcg_ctx->cpu_bndl[reg], tcg_ctx->cpu_bndu[reg]); tcg_gen_shri_i64(tcg_ctx, tcg_ctx->cpu_bndu[reg], tcg_ctx->cpu_bndu[reg], 32); } @@ -8521,11 +8486,11 @@ case 0x101: /* rip-relative generates #ud */ goto illegal_op; } - tcg_gen_not_tl(tcg_ctx, cpu_A0, gen_lea_modrm_1(s, a)); + tcg_gen_not_tl(tcg_ctx, s->A0, gen_lea_modrm_1(s, a)); if (!CODE64(s)) { - tcg_gen_ext32u_tl(tcg_ctx, cpu_A0, cpu_A0); + tcg_gen_ext32u_tl(tcg_ctx, s->A0, s->A0); } - tcg_gen_extu_tl_i64(tcg_ctx, tcg_ctx->cpu_bndu[reg], cpu_A0); + tcg_gen_extu_tl_i64(tcg_ctx, tcg_ctx->cpu_bndu[reg], s->A0); /* bnd registers are now in-use */ gen_set_hflag(s, HF_MPX_IU_MASK); break; @@ -8554,16 +8519,16 @@ case 0x101: } else { gen_lea_modrm(env, s, modrm); if (CODE64(s)) { - tcg_gen_qemu_st_i64(s->uc, tcg_ctx->cpu_bndl[reg], cpu_A0, + tcg_gen_qemu_st_i64(s->uc, tcg_ctx->cpu_bndl[reg], s->A0, s->mem_index, MO_LEQ); - tcg_gen_addi_tl(tcg_ctx, cpu_A0, cpu_A0, 8); - tcg_gen_qemu_st_i64(s->uc, tcg_ctx->cpu_bndu[reg], cpu_A0, + tcg_gen_addi_tl(tcg_ctx, s->A0, s->A0, 8); + tcg_gen_qemu_st_i64(s->uc, tcg_ctx->cpu_bndu[reg], s->A0, s->mem_index, MO_LEQ); } else { - tcg_gen_qemu_st_i64(s->uc, tcg_ctx->cpu_bndl[reg], cpu_A0, + tcg_gen_qemu_st_i64(s->uc, tcg_ctx->cpu_bndl[reg], s->A0, s->mem_index, MO_LEUL); - tcg_gen_addi_tl(tcg_ctx, cpu_A0, cpu_A0, 4); - tcg_gen_qemu_st_i64(s->uc, tcg_ctx->cpu_bndu[reg], cpu_A0, + tcg_gen_addi_tl(tcg_ctx, s->A0, s->A0, 4); + tcg_gen_qemu_st_i64(s->uc, tcg_ctx->cpu_bndu[reg], s->A0, s->mem_index, MO_LEUL); } } @@ -8577,21 +8542,21 @@ case 0x101: goto illegal_op; } if (a.base >= 0) { - tcg_gen_addi_tl(tcg_ctx, cpu_A0, cpu_regs[a.base], a.disp); + tcg_gen_addi_tl(tcg_ctx, s->A0, cpu_regs[a.base], a.disp); } else { - tcg_gen_movi_tl(tcg_ctx, cpu_A0, 0); + tcg_gen_movi_tl(tcg_ctx, s->A0, 0); } - gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override); + gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override); if (a.index >= 0) { tcg_gen_mov_tl(tcg_ctx, cpu_T0, cpu_regs[a.index]); } else { tcg_gen_movi_tl(tcg_ctx, cpu_T0, 0); } if (CODE64(s)) { - gen_helper_bndstx64(tcg_ctx, cpu_env, cpu_A0, cpu_T0, + gen_helper_bndstx64(tcg_ctx, cpu_env, s->A0, cpu_T0, tcg_ctx->cpu_bndl[reg], tcg_ctx->cpu_bndu[reg]); } else { - gen_helper_bndstx32(tcg_ctx, cpu_env, cpu_A0, cpu_T0, + gen_helper_bndstx32(tcg_ctx, cpu_env, s->A0, cpu_T0, tcg_ctx->cpu_bndl[reg], tcg_ctx->cpu_bndu[reg]); } } @@ -8743,7 +8708,7 @@ case 0x101: break; } gen_lea_modrm(env, s, modrm); - gen_helper_fxsave(tcg_ctx, cpu_env, cpu_A0); + gen_helper_fxsave(tcg_ctx, cpu_env, s->A0); break; CASE_MODRM_MEM_OP(1): /* fxrstor */ @@ -8756,7 +8721,7 @@ case 0x101: break; } gen_lea_modrm(env, s, modrm); - gen_helper_fxrstor(tcg_ctx, cpu_env, cpu_A0); + gen_helper_fxrstor(tcg_ctx, cpu_env, s->A0); break; CASE_MODRM_MEM_OP(2): /* ldmxcsr */ @@ -8768,7 +8733,7 @@ case 0x101: break; } gen_lea_modrm(env, s, modrm); - tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL); + tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUL); gen_helper_ldmxcsr(tcg_ctx, cpu_env, cpu_tmp2_i32); break; @@ -8782,7 +8747,7 @@ case 0x101: } gen_lea_modrm(env, s, modrm); tcg_gen_ld32u_tl(tcg_ctx, cpu_T0, cpu_env, offsetof(CPUX86State, mxcsr)); - gen_op_st_v(s, MO_32, cpu_T0, cpu_A0); + gen_op_st_v(s, MO_32, cpu_T0, s->A0); break; CASE_MODRM_MEM_OP(4): /* xsave */ @@ -8794,7 +8759,7 @@ case 0x101: gen_lea_modrm(env, s, modrm); tcg_gen_concat_tl_i64(tcg_ctx, cpu_tmp1_i64, cpu_regs[R_EAX], cpu_regs[R_EDX]); - gen_helper_xsave(tcg_ctx, cpu_env, cpu_A0, cpu_tmp1_i64); + gen_helper_xsave(tcg_ctx, cpu_env, s->A0, cpu_tmp1_i64); break; CASE_MODRM_MEM_OP(5): /* xrstor */ @@ -8806,7 +8771,7 @@ case 0x101: gen_lea_modrm(env, s, modrm); tcg_gen_concat_tl_i64(tcg_ctx, cpu_tmp1_i64, cpu_regs[R_EAX], cpu_regs[R_EDX]); - gen_helper_xrstor(tcg_ctx, cpu_env, cpu_A0, cpu_tmp1_i64); + gen_helper_xrstor(tcg_ctx, cpu_env, s->A0, cpu_tmp1_i64); /* XRSTOR is how MPX is enabled, which changes how we translate. Thus we need to end the TB. */ gen_update_cc_op(s); @@ -8834,7 +8799,7 @@ case 0x101: gen_lea_modrm(env, s, modrm); tcg_gen_concat_tl_i64(tcg_ctx, cpu_tmp1_i64, cpu_regs[R_EAX], cpu_regs[R_EDX]); - gen_helper_xsaveopt(tcg_ctx, cpu_env, cpu_A0, cpu_tmp1_i64); + gen_helper_xsaveopt(tcg_ctx, cpu_env, s->A0, cpu_tmp1_i64); } break; @@ -9217,7 +9182,7 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) tcg_ctx->cpu_T0 = tcg_temp_new(tcg_ctx); tcg_ctx->cpu_T1 = tcg_temp_new(tcg_ctx); - tcg_ctx->cpu_A0 = tcg_temp_new(tcg_ctx); + dc->A0 = tcg_temp_new(tcg_ctx); tcg_ctx->cpu_tmp0 = tcg_temp_new(tcg_ctx); tcg_ctx->cpu_tmp4 = tcg_temp_new(tcg_ctx); diff --git a/qemu/tcg/tcg.h b/qemu/tcg/tcg.h index 9ab1b5d9..690493b3 100644 --- a/qemu/tcg/tcg.h +++ b/qemu/tcg/tcg.h @@ -822,7 +822,6 @@ struct TCGContext { int x86_64_hregs; // qemu/target-i386/translate.c /* qemu/target-i386/translate.c: global TCGv vars */ - TCGv cpu_A0; TCGv cpu_cc_dst; TCGv cpu_cc_src; TCGv cpu_cc_src2;