diff --git a/qemu/tcg/optimize.c b/qemu/tcg/optimize.c index 0ab38e40..36e06930 100644 --- a/qemu/tcg/optimize.c +++ b/qemu/tcg/optimize.c @@ -37,9 +37,9 @@ static inline struct tcg_temp_info *ts_info(TCGTemp *ts) return ts->state_ptr; } -static inline struct tcg_temp_info *arg_info(TCGContext *s, TCGArg arg) +static inline struct tcg_temp_info *arg_info(TCGArg arg) { - return ts_info(arg_temp(s, arg)); + return ts_info(arg_temp(arg)); } static inline bool ts_is_const(TCGTemp *ts) @@ -47,9 +47,9 @@ static inline bool ts_is_const(TCGTemp *ts) return ts_info(ts)->is_const; } -static inline bool arg_is_const(TCGContext *s, TCGArg arg) +static inline bool arg_is_const(TCGArg arg) { - return ts_is_const(arg_temp(s, arg)); + return ts_is_const(arg_temp(arg)); } static inline bool ts_is_copy(TCGTemp *ts) @@ -74,7 +74,7 @@ static void reset_ts(TCGTemp *ts) static void reset_temp(TCGContext *s, TCGArg arg) { - reset_ts(arg_temp(s, arg)); + reset_ts(arg_temp(arg)); } /* Reset all temporaries, given that there are NB_TEMPS of them. */ @@ -105,7 +105,7 @@ static void init_ts_info(TCGContext *s, TCGTemp *ts) static void init_arg_info(TCGContext *s, TCGArg arg) { - init_ts_info(s, arg_temp(s, arg)); + init_ts_info(s, arg_temp(arg)); } static int op_bits(TCGContext *s, TCGOpcode op) @@ -192,16 +192,16 @@ static bool ts_are_copies(TCGTemp *ts1, TCGTemp *ts2) return false; } -static bool args_are_copies(TCGContext *s, TCGArg arg1, TCGArg arg2) +static bool args_are_copies(TCGArg arg1, TCGArg arg2) { - return ts_are_copies(arg_temp(s, arg1), arg_temp(s, arg2)); + return ts_are_copies(arg_temp(arg1), arg_temp(arg2)); } static void tcg_opt_gen_movi(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg val) { TCGOpcode new_op = op_to_movi(s, op->opc); tcg_target_ulong mask; - struct tcg_temp_info *di = arg_info(s, dst); + struct tcg_temp_info *di = arg_info(dst); op->opc = new_op; @@ -222,8 +222,8 @@ static void tcg_opt_gen_movi(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg val) static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src) { - TCGTemp *dst_ts = arg_temp(s, dst); - TCGTemp *src_ts = arg_temp(s, src); + TCGTemp *dst_ts = arg_temp(dst); + TCGTemp *src_ts = arg_temp(src); struct tcg_temp_info *di; struct tcg_temp_info *si; tcg_target_ulong mask; @@ -506,10 +506,10 @@ static bool do_constant_folding_cond_eq(TCGCond c) static TCGArg do_constant_folding_cond(TCGContext *s, TCGOpcode op, TCGArg x, TCGArg y, TCGCond c) { - tcg_target_ulong xv = arg_info(s, x)->val; - tcg_target_ulong yv = arg_info(s, y)->val; + tcg_target_ulong xv = arg_info(x)->val; + tcg_target_ulong yv = arg_info(y)->val; - if (arg_is_const(s, x) && arg_is_const(s, y)) { + if (arg_is_const(x) && arg_is_const(y)) { switch (op_bits(s, op)) { case 32: return do_constant_folding_cond_32(xv, yv, c); @@ -518,9 +518,9 @@ static TCGArg do_constant_folding_cond(TCGContext *s, TCGOpcode op, TCGArg x, default: tcg_abort(); } - } else if (args_are_copies(s, x, y)) { + } else if (args_are_copies(x, y)) { return do_constant_folding_cond_eq(c); - } else if (arg_is_const(s, y) && yv == 0) { + } else if (arg_is_const(y) && yv == 0) { switch (c) { case TCG_COND_LTU: return 0; @@ -540,14 +540,14 @@ static TCGArg do_constant_folding_cond2(TCGContext *s, TCGArg *p1, TCGArg *p2, T TCGArg al = p1[0], ah = p1[1]; TCGArg bl = p2[0], bh = p2[1]; - if (arg_is_const(s, bl) && arg_is_const(s, bh)) { - tcg_target_ulong blv = arg_info(s, bl)->val; - tcg_target_ulong bhv = arg_info(s, bh)->val; + if (arg_is_const(bl) && arg_is_const(bh)) { + tcg_target_ulong blv = arg_info(bl)->val; + tcg_target_ulong bhv = arg_info(bh)->val; uint64_t b = deposit64(blv, 32, 32, bhv); - if (arg_is_const(s, al) && arg_is_const(s, ah)) { - tcg_target_ulong alv = arg_info(s, al)->val; - tcg_target_ulong ahv = arg_info(s, ah)->val; + if (arg_is_const(al) && arg_is_const(ah)) { + tcg_target_ulong alv = arg_info(al)->val; + tcg_target_ulong ahv = arg_info(ah)->val; uint64_t a = deposit64(alv, 32, 32, ahv); return do_constant_folding_cond_64(a, b, c); } @@ -562,7 +562,7 @@ static TCGArg do_constant_folding_cond2(TCGContext *s, TCGArg *p1, TCGArg *p2, T } } } - if (args_are_copies(s, al, bl) && args_are_copies(s, ah, bh)) { + if (args_are_copies(al, bl) && args_are_copies(ah, bh)) { return do_constant_folding_cond_eq(c); } return 2; @@ -572,8 +572,8 @@ static bool swap_commutative(TCGContext *s, TCGArg dest, TCGArg *p1, TCGArg *p2) { TCGArg a1 = *p1, a2 = *p2; int sum = 0; - sum += arg_is_const(s, a1); - sum -= arg_is_const(s, a2); + sum += arg_is_const(a1); + sum -= arg_is_const(a2); /* Prefer the constant in second argument, and then the form op a, a, b, which is better handled on non-RISC hosts. */ @@ -588,10 +588,10 @@ static bool swap_commutative(TCGContext *s, TCGArg dest, TCGArg *p1, TCGArg *p2) static bool swap_commutative2(TCGContext *s, TCGArg *p1, TCGArg *p2) { int sum = 0; - sum += arg_is_const(s, p1[0]); - sum += arg_is_const(s, p1[1]); - sum -= arg_is_const(s, p2[0]); - sum -= arg_is_const(s, p2[1]); + sum += arg_is_const(p1[0]); + sum += arg_is_const(p1[1]); + sum -= arg_is_const(p2[0]); + sum -= arg_is_const(p2[1]); if (sum > 0) { TCGArg t; t = p1[0], p1[0] = p2[0], p2[0] = t; @@ -633,7 +633,7 @@ void tcg_optimize(TCGContext *s) nb_oargs = op->callo; nb_iargs = op->calli; for (i = 0; i < nb_oargs + nb_iargs; i++) { - TCGTemp *ts = arg_temp(s, op->args[i]); + TCGTemp *ts = arg_temp(op->args[i]); if (ts) { init_ts_info(s, ts); } @@ -648,9 +648,9 @@ void tcg_optimize(TCGContext *s) /* Do copy propagation */ for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { - TCGTemp *ts = arg_temp(s, op->args[i]); + TCGTemp *ts = arg_temp(op->args[i]); if (ts && ts_is_copy(ts)) { - op->args[i] = temp_arg(s, find_better_copy(ts)); + op->args[i] = temp_arg(find_better_copy(ts)); } } @@ -719,8 +719,8 @@ void tcg_optimize(TCGContext *s) CASE_OP_32_64(sar): CASE_OP_32_64(rotl): CASE_OP_32_64(rotr): - if (arg_is_const(s, op->args[1]) - && arg_info(s, op->args[1])->val == 0) { + if (arg_is_const(op->args[1]) + && arg_info(op->args[1])->val == 0) { tcg_opt_gen_movi(s, op, op->args[0], 0); continue; } @@ -730,7 +730,7 @@ void tcg_optimize(TCGContext *s) TCGOpcode neg_op; bool have_neg; - if (arg_is_const(s, op->args[2])) { + if (arg_is_const(op->args[2])) { /* Proceed with possible constant folding. */ break; } @@ -744,8 +744,8 @@ void tcg_optimize(TCGContext *s) if (!have_neg) { break; } - if (arg_is_const(s, op->args[1]) - && arg_info(s, op->args[1])->val == 0) { + if (arg_is_const(op->args[1]) + && arg_info(op->args[1])->val == 0) { op->opc = neg_op; reset_temp(s, op->args[0]); op->args[1] = op->args[2]; @@ -755,34 +755,34 @@ void tcg_optimize(TCGContext *s) break; CASE_OP_32_64(xor): CASE_OP_32_64(nand): - if (!arg_is_const(s, op->args[1]) - && arg_is_const(s, op->args[2]) - && arg_info(s, op->args[2])->val == -1) { + if (!arg_is_const(op->args[1]) + && arg_is_const(op->args[2]) + && arg_info(op->args[2])->val == -1) { i = 1; goto try_not; } break; CASE_OP_32_64(nor): - if (!arg_is_const(s, op->args[1]) - && arg_is_const(s, op->args[2]) - && arg_info(s, op->args[2])->val == 0) { + if (!arg_is_const(op->args[1]) + && arg_is_const(op->args[2]) + && arg_info(op->args[2])->val == 0) { i = 1; goto try_not; } break; CASE_OP_32_64(andc): - if (!arg_is_const(s, op->args[2]) - && arg_is_const(s, op->args[1]) - && arg_info(s, op->args[1])->val == -1) { + if (!arg_is_const(op->args[2]) + && arg_is_const(op->args[1]) + && arg_info(op->args[1])->val == -1) { i = 2; goto try_not; } break; CASE_OP_32_64(orc): CASE_OP_32_64(eqv): - if (!arg_is_const(s, op->args[2]) - && arg_is_const(s, op->args[1]) - && arg_info(s, op->args[1])->val == 0) { + if (!arg_is_const(op->args[2]) + && arg_is_const(op->args[1]) + && arg_info(op->args[1])->val == 0) { i = 2; goto try_not; } @@ -823,9 +823,9 @@ void tcg_optimize(TCGContext *s) CASE_OP_32_64(or): CASE_OP_32_64(xor): CASE_OP_32_64(andc): - if (!arg_is_const(s, op->args[1]) - && arg_is_const(s, op->args[2]) - && arg_info(s, op->args[2])->val == 0) { + if (!arg_is_const(op->args[1]) + && arg_is_const(op->args[2]) + && arg_info(op->args[2])->val == 0) { tcg_opt_gen_mov(s, op, op->args[0], op->args[1]); continue; } @@ -833,9 +833,9 @@ void tcg_optimize(TCGContext *s) CASE_OP_32_64(and): CASE_OP_32_64(orc): CASE_OP_32_64(eqv): - if (!arg_is_const(s, op->args[1]) - && arg_is_const(s, op->args[2]) - && arg_info(s, op->args[2])->val == -1) { + if (!arg_is_const(op->args[1]) + && arg_is_const(op->args[2]) + && arg_info(op->args[2])->val == -1) { tcg_opt_gen_mov(s, op, op->args[0], op->args[1]); continue; } @@ -850,21 +850,21 @@ void tcg_optimize(TCGContext *s) affected = -1; switch (opc) { CASE_OP_32_64(ext8s): - if ((arg_info(s, op->args[1])->mask & 0x80) != 0) { + if ((arg_info(op->args[1])->mask & 0x80) != 0) { break; } CASE_OP_32_64(ext8u): mask = 0xff; goto and_const; CASE_OP_32_64(ext16s): - if ((arg_info(s, op->args[1])->mask & 0x8000) != 0) { + if ((arg_info(op->args[1])->mask & 0x8000) != 0) { break; } CASE_OP_32_64(ext16u): mask = 0xffff; goto and_const; case INDEX_op_ext32s_i64: - if ((arg_info(s, op->args[1])->mask & 0x80000000) != 0) { + if ((arg_info(op->args[1])->mask & 0x80000000) != 0) { break; } case INDEX_op_ext32u_i64: @@ -872,114 +872,114 @@ void tcg_optimize(TCGContext *s) goto and_const; CASE_OP_32_64(and): - mask = arg_info(s, op->args[2])->mask; - if (arg_is_const(s, op->args[2])) { + mask = arg_info(op->args[2])->mask; + if (arg_is_const(op->args[2])) { and_const: - affected = arg_info(s, op->args[1])->mask & ~mask; + affected = arg_info(op->args[1])->mask & ~mask; } - mask = arg_info(s, op->args[1])->mask & mask; + mask = arg_info(op->args[1])->mask & mask; break; case INDEX_op_ext_i32_i64: - if ((arg_info(s, op->args[1])->mask & 0x80000000) != 0) { + if ((arg_info(op->args[1])->mask & 0x80000000) != 0) { break; } case INDEX_op_extu_i32_i64: /* We do not compute affected as it is a size changing op. */ - mask = (uint32_t)arg_info(s, op->args[1])->mask; + mask = (uint32_t)arg_info(op->args[1])->mask; break; CASE_OP_32_64(andc): /* Known-zeros does not imply known-ones. Therefore unless args[2] is constant, we can't infer anything from it. */ - if (arg_is_const(s, op->args[2])) { - mask = ~arg_info(s, op->args[2])->mask; + if (arg_is_const(op->args[2])) { + mask = ~arg_info(op->args[2])->mask; goto and_const; } /* But we certainly know nothing outside args[1] may be set. */ - mask = arg_info(s, op->args[1])->mask; + mask = arg_info(op->args[1])->mask; break; case INDEX_op_sar_i32: - if (arg_is_const(s, op->args[2])) { - tmp = arg_info(s, op->args[2])->val & 31; - mask = (int32_t)arg_info(s, op->args[1])->mask >> tmp; + if (arg_is_const(op->args[2])) { + tmp = arg_info(op->args[2])->val & 31; + mask = (int32_t)arg_info(op->args[1])->mask >> tmp; } break; case INDEX_op_sar_i64: - if (arg_is_const(s, op->args[2])) { - tmp = arg_info(s, op->args[2])->val & 63; - mask = (int64_t)arg_info(s, op->args[1])->mask >> tmp; + if (arg_is_const(op->args[2])) { + tmp = arg_info(op->args[2])->val & 63; + mask = (int64_t)arg_info(op->args[1])->mask >> tmp; } break; case INDEX_op_shr_i32: - if (arg_is_const(s, op->args[2])) { - tmp = arg_info(s, op->args[2])->val & 31; - mask = (uint32_t)arg_info(s, op->args[1])->mask >> tmp; + if (arg_is_const(op->args[2])) { + tmp = arg_info(op->args[2])->val & 31; + mask = (uint32_t)arg_info(op->args[1])->mask >> tmp; } break; case INDEX_op_shr_i64: - if (arg_is_const(s, op->args[2])) { - tmp = arg_info(s, op->args[2])->val & 63; - mask = (uint64_t)arg_info(s, op->args[1])->mask >> tmp; + if (arg_is_const(op->args[2])) { + tmp = arg_info(op->args[2])->val & 63; + mask = (uint64_t)arg_info(op->args[1])->mask >> tmp; } break; case INDEX_op_extrl_i64_i32: - mask = (uint32_t)arg_info(s, op->args[1])->mask; + mask = (uint32_t)arg_info(op->args[1])->mask; break; case INDEX_op_extrh_i64_i32: - mask = (uint64_t)arg_info(s, op->args[1])->mask >> 32; + mask = (uint64_t)arg_info(op->args[1])->mask >> 32; break; CASE_OP_32_64(shl): - if (arg_is_const(s, op->args[2])) { - tmp = arg_info(s, op->args[2])->val & (TCG_TARGET_REG_BITS - 1); - mask = arg_info(s, op->args[1])->mask << tmp; + if (arg_is_const(op->args[2])) { + tmp = arg_info(op->args[2])->val & (TCG_TARGET_REG_BITS - 1); + mask = arg_info(op->args[1])->mask << tmp; } break; CASE_OP_32_64(neg): /* Set to 1 all bits to the left of the rightmost. */ - mask = -(arg_info(s, op->args[1])->mask - & -arg_info(s, op->args[1])->mask); + mask = -(arg_info(op->args[1])->mask + & -arg_info(op->args[1])->mask); break; CASE_OP_32_64(deposit): - mask = deposit64(arg_info(s, op->args[1])->mask, + mask = deposit64(arg_info(op->args[1])->mask, op->args[3], op->args[4], - arg_info(s, op->args[2])->mask); + arg_info(op->args[2])->mask); break; CASE_OP_32_64(extract): - mask = extract64(arg_info(s, op->args[1])->mask, + mask = extract64(arg_info(op->args[1])->mask, op->args[2], op->args[3]); if (op->args[2] == 0) { - affected = arg_info(s, op->args[1])->mask & ~mask; + affected = arg_info(op->args[1])->mask & ~mask; } break; CASE_OP_32_64(sextract): - mask = sextract64(arg_info(s, op->args[1])->mask, + mask = sextract64(arg_info(op->args[1])->mask, op->args[2], op->args[3]); if (op->args[2] == 0 && (tcg_target_long)mask >= 0) { - affected = arg_info(s, op->args[1])->mask & ~mask; + affected = arg_info(op->args[1])->mask & ~mask; } break; CASE_OP_32_64(or): CASE_OP_32_64(xor): - mask = arg_info(s, op->args[1])->mask | arg_info(s, op->args[2])->mask; + mask = arg_info(op->args[1])->mask | arg_info(op->args[2])->mask; break; case INDEX_op_clz_i32: case INDEX_op_ctz_i32: - mask = arg_info(s, op->args[2])->mask | 31; + mask = arg_info(op->args[2])->mask | 31; break; case INDEX_op_clz_i64: case INDEX_op_ctz_i64: - mask = arg_info(s, op->args[2])->mask | 63; + mask = arg_info(op->args[2])->mask | 63; break; case INDEX_op_ctpop_i32: @@ -995,7 +995,7 @@ void tcg_optimize(TCGContext *s) break; CASE_OP_32_64(movcond): - mask = arg_info(s, op->args[3])->mask | arg_info(s, op->args[4])->mask; + mask = arg_info(op->args[3])->mask | arg_info(op->args[4])->mask; break; CASE_OP_32_64(ld8u): @@ -1049,8 +1049,8 @@ void tcg_optimize(TCGContext *s) CASE_OP_32_64(mul): CASE_OP_32_64(muluh): CASE_OP_32_64(mulsh): - if (arg_is_const(s, op->args[2]) - && arg_info(s, op->args[2])->val == 0) { + if (arg_is_const(op->args[2]) + && arg_info(op->args[2])->val == 0) { tcg_opt_gen_movi(s, op, op->args[0], 0); continue; } @@ -1063,7 +1063,7 @@ void tcg_optimize(TCGContext *s) switch (opc) { CASE_OP_32_64(or): CASE_OP_32_64(and): - if (args_are_copies(s, op->args[1], op->args[2])) { + if (args_are_copies(op->args[1], op->args[2])) { tcg_opt_gen_mov(s, op, op->args[0], op->args[1]); continue; } @@ -1077,7 +1077,7 @@ void tcg_optimize(TCGContext *s) CASE_OP_32_64(andc): CASE_OP_32_64(sub): CASE_OP_32_64(xor): - if (args_are_copies(s, op->args[1], op->args[2])) { + if (args_are_copies(op->args[1], op->args[2])) { tcg_opt_gen_movi(s, op, op->args[0], 0); continue; } @@ -1110,8 +1110,8 @@ void tcg_optimize(TCGContext *s) case INDEX_op_extu_i32_i64: case INDEX_op_extrl_i64_i32: case INDEX_op_extrh_i64_i32: - if (arg_is_const(s, op->args[1])) { - tmp = do_constant_folding(s, opc, arg_info(s, op->args[1])->val, 0); + if (arg_is_const(op->args[1])) { + tmp = do_constant_folding(s, opc, arg_info(op->args[1])->val, 0); tcg_opt_gen_movi(s, op, op->args[0], tmp); break; } @@ -1139,9 +1139,9 @@ void tcg_optimize(TCGContext *s) CASE_OP_32_64(divu): CASE_OP_32_64(rem): CASE_OP_32_64(remu): - if (arg_is_const(s, op->args[1]) && arg_is_const(s, op->args[2])) { - tmp = do_constant_folding(s, opc, arg_info(s, op->args[1])->val, - arg_info(s, op->args[2])->val); + if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { + tmp = do_constant_folding(s, opc, arg_info(op->args[1])->val, + arg_info(op->args[2])->val); tcg_opt_gen_movi(s, op, op->args[0], tmp); break; } @@ -1149,8 +1149,8 @@ void tcg_optimize(TCGContext *s) CASE_OP_32_64(clz): CASE_OP_32_64(ctz): - if (arg_is_const(s, op->args[1])) { - TCGArg v = arg_info(s, op->args[1])->val; + if (arg_is_const(op->args[1])) { + TCGArg v = arg_info(op->args[1])->val; if (v != 0) { tmp = do_constant_folding(s, opc, v, 0); tcg_opt_gen_movi(s, op, op->args[0], tmp); @@ -1162,18 +1162,18 @@ void tcg_optimize(TCGContext *s) goto do_default; CASE_OP_32_64(deposit): - if (arg_is_const(s, op->args[1]) && arg_is_const(s, op->args[2])) { - tmp = deposit64(arg_info(s, op->args[1])->val, + if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { + tmp = deposit64(arg_info(op->args[1])->val, op->args[3], op->args[4], - arg_info(s, op->args[2])->val); + arg_info(op->args[2])->val); tcg_opt_gen_movi(s, op, op->args[0], tmp); break; } goto do_default; CASE_OP_32_64(extract): - if (arg_is_const(s, op->args[1])) { - tmp = extract64(arg_info(s, op->args[1])->val, + if (arg_is_const(op->args[1])) { + tmp = extract64(arg_info(op->args[1])->val, op->args[2], op->args[3]); tcg_opt_gen_movi(s, op, op->args[0], tmp); break; @@ -1181,8 +1181,8 @@ void tcg_optimize(TCGContext *s) goto do_default; CASE_OP_32_64(sextract): - if (arg_is_const(s, op->args[1])) { - tmp = sextract64(arg_info(s, op->args[1])->val, + if (arg_is_const(op->args[1])) { + tmp = sextract64(arg_info(op->args[1])->val, op->args[2], op->args[3]); tcg_opt_gen_movi(s, op, op->args[0], tmp); break; @@ -1220,9 +1220,9 @@ void tcg_optimize(TCGContext *s) tcg_opt_gen_mov(s, op, op->args[0], op->args[4-tmp]); break; } - if (arg_is_const(s, op->args[3]) && arg_is_const(s, op->args[4])) { - tcg_target_ulong tv = arg_info(s, op->args[3])->val; - tcg_target_ulong fv = arg_info(s, op->args[4])->val; + if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) { + tcg_target_ulong tv = arg_info(op->args[3])->val; + tcg_target_ulong fv = arg_info(op->args[4])->val; TCGCond cond = op->args[5]; if (fv == 1 && tv == 0) { cond = tcg_invert_cond(cond); @@ -1239,12 +1239,12 @@ void tcg_optimize(TCGContext *s) case INDEX_op_add2_i32: case INDEX_op_sub2_i32: - if (arg_is_const(s, op->args[2]) && arg_is_const(s, op->args[3]) - && arg_is_const(s, op->args[4]) && arg_is_const(s, op->args[5])) { - uint32_t al = arg_info(s, op->args[2])->val; - uint32_t ah = arg_info(s, op->args[3])->val; - uint32_t bl = arg_info(s, op->args[4])->val; - uint32_t bh = arg_info(s, op->args[5])->val; + if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) + && arg_is_const(op->args[4]) && arg_is_const(op->args[5])) { + uint32_t al = arg_info(op->args[2])->val; + uint32_t ah = arg_info(op->args[3])->val; + uint32_t bl = arg_info(op->args[4])->val; + uint32_t bh = arg_info(op->args[5])->val; uint64_t a = ((uint64_t)ah << 32) | al; uint64_t b = ((uint64_t)bh << 32) | bl; TCGArg rl, rh; @@ -1268,9 +1268,9 @@ void tcg_optimize(TCGContext *s) goto do_default; case INDEX_op_mulu2_i32: - if (arg_is_const(s, op->args[2]) && arg_is_const(s, op->args[3])) { - uint32_t a = arg_info(s, op->args[2])->val; - uint32_t b = arg_info(s, op->args[3])->val; + if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) { + uint32_t a = arg_info(op->args[2])->val; + uint32_t b = arg_info(op->args[3])->val; uint64_t r = (uint64_t)a * b; TCGArg rl, rh; TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_movi_i32, 2); @@ -1301,10 +1301,10 @@ void tcg_optimize(TCGContext *s) } } else if ((op->args[4] == TCG_COND_LT || op->args[4] == TCG_COND_GE) - && arg_is_const(s, op->args[2]) - && arg_info(s, op->args[2])->val == 0 - && arg_is_const(s, op->args[3]) - && arg_info(s, op->args[3])->val == 0) { + && arg_is_const(op->args[2]) + && arg_info(op->args[2])->val == 0 + && arg_is_const(op->args[3]) + && arg_info(op->args[3])->val == 0) { /* Simplify LT/GE comparisons vs zero to a single compare vs the high word of the input. */ do_brcond_high: @@ -1372,15 +1372,15 @@ void tcg_optimize(TCGContext *s) tcg_opt_gen_movi(s, op, op->args[0], tmp); } else if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE) - && arg_is_const(s, op->args[3]) - && arg_info(s, op->args[3])->val == 0 - && arg_is_const(s, op->args[4]) - && arg_info(s, op->args[4])->val == 0) { + && arg_is_const(op->args[3]) + && arg_info(op->args[3])->val == 0 + && arg_is_const(op->args[4]) + && arg_info(op->args[4])->val == 0) { /* Simplify LT/GE comparisons vs zero to a single compare vs the high word of the input. */ do_setcond_high: reset_temp(s, op->args[0]); - arg_info(s, op->args[0])->mask = 1; + arg_info(op->args[0])->mask = 1; op->opc = INDEX_op_setcond_i32; op->args[1] = op->args[2]; op->args[2] = op->args[4]; @@ -1406,7 +1406,7 @@ void tcg_optimize(TCGContext *s) } do_setcond_low: reset_temp(s, op->args[0]); - arg_info(s, op->args[0])->mask = 1; + arg_info(op->args[0])->mask = 1; op->opc = INDEX_op_setcond_i32; op->args[2] = op->args[3]; op->args[3] = op->args[5]; @@ -1462,7 +1462,7 @@ void tcg_optimize(TCGContext *s) /* Save the corresponding known-zero bits mask for the first output argument (only one supported so far). */ if (i == 0) { - arg_info(s, op->args[i])->mask = mask; + arg_info(op->args[i])->mask = mask; } } } diff --git a/qemu/tcg/tcg-op.c b/qemu/tcg/tcg-op.c index 96b883b6..d83f3f8c 100644 --- a/qemu/tcg/tcg-op.c +++ b/qemu/tcg/tcg-op.c @@ -2471,7 +2471,7 @@ void tcg_gen_extrl_i64_i32(TCGContext *s, TCGv_i32 ret, TCGv_i64 arg) tcg_gen_mov_i32(s, ret, TCGV_LOW(s, arg)); } else if (TCG_TARGET_HAS_extrl_i64_i32) { tcg_gen_op2(s, INDEX_op_extrl_i64_i32, - tcgv_i32_arg(ret), tcgv_i64_arg(arg)); + tcgv_i32_arg(s, ret), tcgv_i64_arg(s, arg)); } else { tcg_gen_mov_i32(s, ret, (TCGv_i32)arg); } @@ -2483,7 +2483,7 @@ void tcg_gen_extrh_i64_i32(TCGContext *s, TCGv_i32 ret, TCGv_i64 arg) tcg_gen_mov_i32(s, ret, TCGV_HIGH(s, arg)); } else if (TCG_TARGET_HAS_extrh_i64_i32) { tcg_gen_op2(s, INDEX_op_extrh_i64_i32, - tcgv_i32_arg(ret), tcgv_i64_arg(arg)); + tcgv_i32_arg(s, ret), tcgv_i64_arg(s, arg)); } else { TCGv_i64 t = tcg_temp_new_i64(s); tcg_gen_shri_i64(s, t, arg, 32); @@ -2499,7 +2499,7 @@ void tcg_gen_extu_i32_i64(TCGContext *s, TCGv_i64 ret, TCGv_i32 arg) tcg_gen_movi_i32(s, TCGV_HIGH(s, ret), 0); } else { tcg_gen_op2(s, INDEX_op_extu_i32_i64, - tcgv_i64_arg(ret), tcgv_i32_arg(arg)); + tcgv_i64_arg(s, ret), tcgv_i32_arg(s, arg)); } } @@ -2510,7 +2510,7 @@ void tcg_gen_ext_i32_i64(TCGContext *s, TCGv_i64 ret, TCGv_i32 arg) tcg_gen_sari_i32(s, TCGV_HIGH(s, ret), TCGV_LOW(s, ret), 31); } else { tcg_gen_op2(s, INDEX_op_ext_i32_i64, - tcgv_i64_arg(ret), tcgv_i32_arg(arg)); + tcgv_i64_arg(s, ret), tcgv_i32_arg(s, arg)); } } @@ -2576,7 +2576,7 @@ void tcg_gen_lookup_and_goto_ptr(TCGContext *s) if (TCG_TARGET_HAS_goto_ptr && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { TCGv_ptr ptr = tcg_temp_new_ptr(s); gen_helper_lookup_tb_ptr(s, ptr, s->tcg_env); - tcg_gen_op1i(s, INDEX_op_goto_ptr, tcgv_ptr_arg(ptr)); + tcg_gen_op1i(s, INDEX_op_goto_ptr, tcgv_ptr_arg(s, ptr)); tcg_temp_free_ptr(s, ptr); } else { tcg_gen_exit_tb(s, 0); @@ -2621,7 +2621,7 @@ static void gen_ldst_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 val, TCGv addr, if (TCG_TARGET_REG_BITS == 32) { tcg_gen_op4i_i32(s, opc, val, TCGV_LOW(s, addr), TCGV_HIGH(s, addr), oi); } else { - tcg_gen_op3(s, opc, tcgv_i32_arg(val), tcgv_i64_arg(addr), oi); + tcg_gen_op3(s, opc, tcgv_i32_arg(s, val), tcgv_i64_arg(s, addr), oi); } #endif } @@ -2634,7 +2634,7 @@ static void gen_ldst_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 val, TCGv addr, if (TCG_TARGET_REG_BITS == 32) { tcg_gen_op4i_i32(s, opc, TCGV_LOW(s, val), TCGV_HIGH(s, val), addr, oi); } else { - tcg_gen_op3(s, opc, tcgv_i64_arg(val), tcgv_i32_arg(addr), oi); + tcg_gen_op3(s, opc, tcgv_i64_arg(s, val), tcgv_i32_arg(s, addr), oi); } #else if (TCG_TARGET_REG_BITS == 32) { diff --git a/qemu/tcg/tcg-op.h b/qemu/tcg/tcg-op.h index 9d4afeb8..ebf33c01 100644 --- a/qemu/tcg/tcg-op.h +++ b/qemu/tcg/tcg-op.h @@ -47,12 +47,12 @@ static inline void gen_uc_tracecode(TCGContext *tcg_ctx, int32_t size, int32_t t static inline void tcg_gen_op1_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 a1) { - tcg_gen_op1(s, opc, tcgv_i32_arg(a1)); + tcg_gen_op1(s, opc, tcgv_i32_arg(s, a1)); } static inline void tcg_gen_op1_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 a1) { - tcg_gen_op1(s, opc, tcgv_i64_arg(a1)); + tcg_gen_op1(s, opc, tcgv_i64_arg(s, a1)); } static inline void tcg_gen_op1i(TCGContext *s, TCGOpcode opc, TCGArg a1) @@ -62,22 +62,22 @@ static inline void tcg_gen_op1i(TCGContext *s, TCGOpcode opc, TCGArg a1) static inline void tcg_gen_op2_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2) { - tcg_gen_op2(s, opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2)); + tcg_gen_op2(s, opc, tcgv_i32_arg(s, a1), tcgv_i32_arg(s, a2)); } static inline void tcg_gen_op2_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2) { - tcg_gen_op2(s, opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2)); + tcg_gen_op2(s, opc, tcgv_i64_arg(s, a1), tcgv_i64_arg(s, a2)); } static inline void tcg_gen_op2i_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 a1, TCGArg a2) { - tcg_gen_op2(s, opc, tcgv_i32_arg(a1), a2); + tcg_gen_op2(s, opc, tcgv_i32_arg(s, a1), a2); } static inline void tcg_gen_op2i_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 a1, TCGArg a2) { - tcg_gen_op2(s, opc, tcgv_i64_arg(a1), a2); + tcg_gen_op2(s, opc, tcgv_i64_arg(s, a1), a2); } static inline void tcg_gen_op2ii(TCGContext *s, TCGOpcode opc, TCGArg a1, TCGArg a2) @@ -88,173 +88,173 @@ static inline void tcg_gen_op2ii(TCGContext *s, TCGOpcode opc, TCGArg a1, TCGArg static inline void tcg_gen_op3_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, TCGv_i32 a3) { - tcg_gen_op3(s, opc, tcgv_i32_arg(a1), - tcgv_i32_arg(a2), tcgv_i32_arg(a3)); + tcg_gen_op3(s, opc, tcgv_i32_arg(s, a1), + tcgv_i32_arg(s, a2), tcgv_i32_arg(s, a3)); } static inline void tcg_gen_op3_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, TCGv_i64 a3) { - tcg_gen_op3(s, opc, tcgv_i64_arg(a1), - tcgv_i64_arg(a2), tcgv_i64_arg(a3)); + tcg_gen_op3(s, opc, tcgv_i64_arg(s, a1), + tcgv_i64_arg(s, a2), tcgv_i64_arg(s, a3)); } static inline void tcg_gen_op3i_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, TCGArg a3) { - tcg_gen_op3(s, opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), a3); + tcg_gen_op3(s, opc, tcgv_i32_arg(s, a1), tcgv_i32_arg(s, a2), a3); } static inline void tcg_gen_op3i_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, TCGArg a3) { - tcg_gen_op3(s, opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), a3); + tcg_gen_op3(s, opc, tcgv_i64_arg(s, a1), tcgv_i64_arg(s, a2), a3); } static inline void tcg_gen_ldst_op_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 val, TCGv_ptr base, TCGArg offset) { - tcg_gen_op3(s, opc, tcgv_i32_arg(val), tcgv_ptr_arg(base), offset); + tcg_gen_op3(s, opc, tcgv_i32_arg(s, val), tcgv_ptr_arg(s, base), offset); } static inline void tcg_gen_ldst_op_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 val, TCGv_ptr base, TCGArg offset) { - tcg_gen_op3(s, opc, tcgv_i64_arg(val), tcgv_ptr_arg(base), offset); + tcg_gen_op3(s, opc, tcgv_i64_arg(s, val), tcgv_ptr_arg(s, base), offset); } static inline void tcg_gen_op4_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, TCGv_i32 a3, TCGv_i32 a4) { - tcg_gen_op4(s, opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), - tcgv_i32_arg(a3), tcgv_i32_arg(a4)); + tcg_gen_op4(s, opc, tcgv_i32_arg(s, a1), tcgv_i32_arg(s, a2), + tcgv_i32_arg(s, a3), tcgv_i32_arg(s, a4)); } static inline void tcg_gen_op4_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, TCGv_i64 a3, TCGv_i64 a4) { - tcg_gen_op4(s, opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), - tcgv_i64_arg(a3), tcgv_i64_arg(a4)); + tcg_gen_op4(s, opc, tcgv_i64_arg(s, a1), tcgv_i64_arg(s, a2), + tcgv_i64_arg(s, a3), tcgv_i64_arg(s, a4)); } static inline void tcg_gen_op4i_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, TCGv_i32 a3, TCGArg a4) { - tcg_gen_op4(s, opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), - tcgv_i32_arg(a3), a4); + tcg_gen_op4(s, opc, tcgv_i32_arg(s, a1), tcgv_i32_arg(s, a2), + tcgv_i32_arg(s, a3), a4); } static inline void tcg_gen_op4i_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, TCGv_i64 a3, TCGArg a4) { - tcg_gen_op4(s, opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), - tcgv_i64_arg(a3), a4); + tcg_gen_op4(s, opc, tcgv_i64_arg(s, a1), tcgv_i64_arg(s, a2), + tcgv_i64_arg(s, a3), a4); } static inline void tcg_gen_op4ii_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, TCGArg a3, TCGArg a4) { - tcg_gen_op4(s, opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), a3, a4); + tcg_gen_op4(s, opc, tcgv_i32_arg(s, a1), tcgv_i32_arg(s, a2), a3, a4); } static inline void tcg_gen_op4ii_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, TCGArg a3, TCGArg a4) { - tcg_gen_op4(s, opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), a3, a4); + tcg_gen_op4(s, opc, tcgv_i64_arg(s, a1), tcgv_i64_arg(s, a2), a3, a4); } static inline void tcg_gen_op5_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, TCGv_i32 a3, TCGv_i32 a4, TCGv_i32 a5) { - tcg_gen_op5(s, opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), - tcgv_i32_arg(a3), tcgv_i32_arg(a4), tcgv_i32_arg(a5)); + tcg_gen_op5(s, opc, tcgv_i32_arg(s, a1), tcgv_i32_arg(s, a2), + tcgv_i32_arg(s, a3), tcgv_i32_arg(s, a4), tcgv_i32_arg(s, a5)); } static inline void tcg_gen_op5_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, TCGv_i64 a3, TCGv_i64 a4, TCGv_i64 a5) { - tcg_gen_op5(s, opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), - tcgv_i64_arg(a3), tcgv_i64_arg(a4), tcgv_i64_arg(a5)); + tcg_gen_op5(s, opc, tcgv_i64_arg(s, a1), tcgv_i64_arg(s, a2), + tcgv_i64_arg(s, a3), tcgv_i64_arg(s, a4), tcgv_i64_arg(s, a5)); } static inline void tcg_gen_op5i_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, TCGv_i32 a3, TCGv_i32 a4, TCGArg a5) { - tcg_gen_op5(s, opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), - tcgv_i32_arg(a3), tcgv_i32_arg(a4), a5); + tcg_gen_op5(s, opc, tcgv_i32_arg(s, a1), tcgv_i32_arg(s, a2), + tcgv_i32_arg(s, a3), tcgv_i32_arg(s, a4), a5); } static inline void tcg_gen_op5i_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, TCGv_i64 a3, TCGv_i64 a4, TCGArg a5) { - tcg_gen_op5(s, opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), - tcgv_i64_arg(a3), tcgv_i64_arg(a4), a5); + tcg_gen_op5(s, opc, tcgv_i64_arg(s, a1), tcgv_i64_arg(s, a2), + tcgv_i64_arg(s, a3), tcgv_i64_arg(s, a4), a5); } static inline void tcg_gen_op5ii_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, TCGv_i32 a3, TCGArg a4, TCGArg a5) { - tcg_gen_op5(s, opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), - tcgv_i32_arg(a3), a4, a5); + tcg_gen_op5(s, opc, tcgv_i32_arg(s, a1), tcgv_i32_arg(s, a2), + tcgv_i32_arg(s, a3), a4, a5); } static inline void tcg_gen_op5ii_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, TCGv_i64 a3, TCGArg a4, TCGArg a5) { - tcg_gen_op5(s, opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), - tcgv_i64_arg(a3), a4, a5); + tcg_gen_op5(s, opc, tcgv_i64_arg(s, a1), tcgv_i64_arg(s, a2), + tcgv_i64_arg(s, a3), a4, a5); } static inline void tcg_gen_op6_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, TCGv_i32 a3, TCGv_i32 a4, TCGv_i32 a5, TCGv_i32 a6) { - tcg_gen_op6(s, opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), - tcgv_i32_arg(a3), tcgv_i32_arg(a4), tcgv_i32_arg(a5), - tcgv_i32_arg(a6)); + tcg_gen_op6(s, opc, tcgv_i32_arg(s, a1), tcgv_i32_arg(s, a2), + tcgv_i32_arg(s, a3), tcgv_i32_arg(s, a4), tcgv_i32_arg(s, a5), + tcgv_i32_arg(s, a6)); } static inline void tcg_gen_op6_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, TCGv_i64 a3, TCGv_i64 a4, TCGv_i64 a5, TCGv_i64 a6) { - tcg_gen_op6(s, opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), - tcgv_i64_arg(a3), tcgv_i64_arg(a4), tcgv_i64_arg(a5), - tcgv_i64_arg(a6)); + tcg_gen_op6(s, opc, tcgv_i64_arg(s, a1), tcgv_i64_arg(s, a2), + tcgv_i64_arg(s, a3), tcgv_i64_arg(s, a4), tcgv_i64_arg(s, a5), + tcgv_i64_arg(s, a6)); } static inline void tcg_gen_op6i_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, TCGv_i32 a3, TCGv_i32 a4, TCGv_i32 a5, TCGArg a6) { - tcg_gen_op6(s, opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), - tcgv_i32_arg(a3), tcgv_i32_arg(a4), tcgv_i32_arg(a5), a6); + tcg_gen_op6(s, opc, tcgv_i32_arg(s, a1), tcgv_i32_arg(s, a2), + tcgv_i32_arg(s, a3), tcgv_i32_arg(s, a4), tcgv_i32_arg(s, a5), a6); } static inline void tcg_gen_op6i_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, TCGv_i64 a3, TCGv_i64 a4, TCGv_i64 a5, TCGArg a6) { - tcg_gen_op6(s, opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), - tcgv_i64_arg(a3), tcgv_i64_arg(a4), tcgv_i64_arg(a5), a6); + tcg_gen_op6(s, opc, tcgv_i64_arg(s, a1), tcgv_i64_arg(s, a2), + tcgv_i64_arg(s, a3), tcgv_i64_arg(s, a4), tcgv_i64_arg(s, a5), a6); } static inline void tcg_gen_op6ii_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, TCGv_i32 a3, TCGv_i32 a4, TCGArg a5, TCGArg a6) { - tcg_gen_op6(s, opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), - tcgv_i32_arg(a3), tcgv_i32_arg(a4), a5, a6); + tcg_gen_op6(s, opc, tcgv_i32_arg(s, a1), tcgv_i32_arg(s, a2), + tcgv_i32_arg(s, a3), tcgv_i32_arg(s, a4), a5, a6); } static inline void tcg_gen_op6ii_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, TCGv_i64 a3, TCGv_i64 a4, TCGArg a5, TCGArg a6) { - tcg_gen_op6(s, opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), - tcgv_i64_arg(a3), tcgv_i64_arg(a4), a5, a6); + tcg_gen_op6(s, opc, tcgv_i64_arg(s, a1), tcgv_i64_arg(s, a2), + tcgv_i64_arg(s, a3), tcgv_i64_arg(s, a4), a5, a6); } /* Generic ops. */ diff --git a/qemu/tcg/tcg.c b/qemu/tcg/tcg.c index 78177356..5ced8612 100644 --- a/qemu/tcg/tcg.c +++ b/qemu/tcg/tcg.c @@ -1040,21 +1040,21 @@ void tcg_gen_callN(TCGContext *s, void *func, TCGTemp *ret, int nargs, TCGTemp * op->args[pi++] = tcgv_i64_arg(retl); nb_rets = 2; } else { - op->args[pi++] = temp_arg(s, ret); + op->args[pi++] = temp_arg(ret); nb_rets = 1; } #else if (TCG_TARGET_REG_BITS < 64 && (sizemask & 1)) { #ifdef HOST_WORDS_BIGENDIAN - op->args[pi++] = temp_arg(s, ret + 1); - op->args[pi++] = temp_arg(s, ret); + op->args[pi++] = temp_arg(ret + 1); + op->args[pi++] = temp_arg(ret); #else - op->args[pi++] = temp_arg(s, ret); - op->args[pi++] = temp_arg(s, ret + 1); + op->args[pi++] = temp_arg(ret); + op->args[pi++] = temp_arg(ret + 1); #endif nb_rets = 2; } else { - op->args[pi++] = temp_arg(s, ret); + op->args[pi++] = temp_arg(ret); nb_rets = 1; } #endif @@ -1085,17 +1085,17 @@ void tcg_gen_callN(TCGContext *s, void *func, TCGTemp *ret, int nargs, TCGTemp * have to get more complicated to differentiate between stack arguments and register arguments. */ #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP) - op->args[pi++] = temp_arg(s, args[i] + 1); - op->args[pi++] = temp_arg(s, args[i]); + op->args[pi++] = temp_arg(args[i] + 1); + op->args[pi++] = temp_arg(args[i]); #else - op->args[pi++] = temp_arg(s, args[i]); - op->args[pi++] = temp_arg(s, args[i] + 1); + op->args[pi++] = temp_arg(args[i]); + op->args[pi++] = temp_arg(args[i] + 1); #endif real_args += 2; continue; } - op->args[pi++] = temp_arg(s, args[i]); + op->args[pi++] = temp_arg(args[i]); real_args++; } op->args[pi++] = (uintptr_t)func; @@ -1171,7 +1171,7 @@ static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size, static char *tcg_get_arg_str(TCGContext *s, char *buf, int buf_size, TCGArg arg) { - return tcg_get_arg_str_ptr(s, buf, buf_size, arg_temp(s, arg)); + return tcg_get_arg_str_ptr(s, buf, buf_size, arg_temp(arg)); } /* Find helper name. */ @@ -1731,7 +1731,7 @@ static void liveness_pass_1(TCGContext *s) /* pure functions can be removed if their result is unused */ if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) { for (i = 0; i < nb_oargs; i++) { - arg_ts = arg_temp(s, op->args[i]); + arg_ts = arg_temp(op->args[i]); if (arg_ts->state != TS_DEAD) { goto do_not_remove_call; } @@ -1742,7 +1742,7 @@ static void liveness_pass_1(TCGContext *s) /* output args are dead */ for (i = 0; i < nb_oargs; i++) { - arg_ts = arg_temp(s, op->args[i]); + arg_ts = arg_temp(op->args[i]); if (arg_ts->state & TS_DEAD) { arg_life |= DEAD_ARG << i; } @@ -1767,14 +1767,14 @@ static void liveness_pass_1(TCGContext *s) /* record arguments that die in this helper */ for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) { - arg_ts = arg_temp(s, op->args[i]); + arg_ts = arg_temp(op->args[i]); if (arg_ts && arg_ts->state & TS_DEAD) { arg_life |= DEAD_ARG << i; } } /* input arguments are live for preceding opcodes */ for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) { - arg_ts = arg_temp(s, op->args[i]); + arg_ts = arg_temp(op->args[i]); if (arg_ts) { arg_ts->state &= ~TS_DEAD; } @@ -1786,7 +1786,7 @@ static void liveness_pass_1(TCGContext *s) break; case INDEX_op_discard: /* mark the temporary as dead */ - arg_temp(s, op->args[0])->state = TS_DEAD; + arg_temp(op->args[0])->state = TS_DEAD; break; case INDEX_op_add2_i32: @@ -1807,8 +1807,8 @@ static void liveness_pass_1(TCGContext *s) the low part. The result can be optimized to a simple add or sub. This happens often for x86_64 guest when the cpu mode is set to 32 bit. */ - if (arg_temp(s, op->args[1])->state == TS_DEAD) { - if (arg_temp(s, op->args[0])->state == TS_DEAD) { + if (arg_temp(op->args[1])->state == TS_DEAD) { + if (arg_temp(op->args[0])->state == TS_DEAD) { goto do_remove; } /* Replace the opcode and adjust the args in place, @@ -1845,8 +1845,8 @@ static void liveness_pass_1(TCGContext *s) do_mul2: nb_iargs = 2; nb_oargs = 2; - if (arg_temp(s, op->args[1])->state == TS_DEAD) { - if (arg_temp(s, op->args[0])->state == TS_DEAD) { + if (arg_temp(op->args[1])->state == TS_DEAD) { + if (arg_temp(op->args[0])->state == TS_DEAD) { /* Both parts of the operation are dead. */ goto do_remove; } @@ -1854,7 +1854,7 @@ static void liveness_pass_1(TCGContext *s) op->opc = opc = opc_new; op->args[1] = op->args[2]; op->args[2] = op->args[3]; - } else if (arg_temp(s, op->args[0])->state == TS_DEAD && have_opc_new2) { + } else if (arg_temp(op->args[0])->state == TS_DEAD && have_opc_new2) { /* The low part of the operation is dead; generate the high. */ op->opc = opc = opc_new2; op->args[0] = op->args[1]; @@ -1877,7 +1877,7 @@ static void liveness_pass_1(TCGContext *s) implies side effects */ if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) { for (i = 0; i < nb_oargs; i++) { - if (arg_temp(s, op->args[i])->state != TS_DEAD) { + if (arg_temp(op->args[i])->state != TS_DEAD) { goto do_not_remove; } } @@ -1887,7 +1887,7 @@ static void liveness_pass_1(TCGContext *s) do_not_remove: /* output args are dead */ for (i = 0; i < nb_oargs; i++) { - arg_ts = arg_temp(s, op->args[i]); + arg_ts = arg_temp(op->args[i]); if (arg_ts->state & TS_DEAD) { arg_life |= DEAD_ARG << i; } @@ -1922,14 +1922,14 @@ static void liveness_pass_1(TCGContext *s) /* record arguments that die in this opcode */ for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { - arg_ts = arg_temp(s, op->args[i]); + arg_ts = arg_temp(op->args[i]); if (arg_ts->state & TS_DEAD) { arg_life |= DEAD_ARG << i; } } /* input arguments are live for preceding opcodes */ for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { - arg_temp(s, op->args[i])->state &= ~TS_DEAD; + arg_temp(op->args[i])->state &= ~TS_DEAD; } } break; @@ -1999,7 +1999,7 @@ static bool liveness_pass_2(TCGContext *s) /* Make sure that input arguments are available. */ for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) { - arg_ts = arg_temp(s, op->args[i]); + arg_ts = arg_temp(op->args[i]); if (arg_ts) { dir_ts = arg_ts->state_ptr; if (dir_ts && arg_ts->state == TS_DEAD) { @@ -2008,8 +2008,8 @@ static bool liveness_pass_2(TCGContext *s) : INDEX_op_ld_i64); TCGOp *lop = tcg_op_insert_before(s, op, lopc, 3); - lop->args[0] = temp_arg(s, dir_ts); - lop->args[1] = temp_arg(s, arg_ts->mem_base); + lop->args[0] = temp_arg(dir_ts); + lop->args[1] = temp_arg(arg_ts->mem_base); lop->args[2] = arg_ts->mem_offset; /* Loaded, but synced with memory. */ @@ -2022,11 +2022,11 @@ static bool liveness_pass_2(TCGContext *s) No action is required except keeping temp_state up to date so that we reload when needed. */ for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) { - arg_ts = arg_temp(s, op->args[i]); + arg_ts = arg_temp(op->args[i]); if (arg_ts) { dir_ts = arg_ts->state_ptr; if (dir_ts) { - op->args[i] = temp_arg(s, dir_ts); + op->args[i] = temp_arg(dir_ts); changes = true; if (IS_DEAD_ARG(i)) { arg_ts->state = TS_DEAD; @@ -2059,12 +2059,12 @@ static bool liveness_pass_2(TCGContext *s) /* Outputs become available. */ for (i = 0; i < nb_oargs; i++) { - arg_ts = arg_temp(s, op->args[i]); + arg_ts = arg_temp(op->args[i]); dir_ts = arg_ts->state_ptr; if (!dir_ts) { continue; } - op->args[i] = temp_arg(s, dir_ts); + op->args[i] = temp_arg(dir_ts); changes = true; /* The output is now live and modified. */ @@ -2077,8 +2077,8 @@ static bool liveness_pass_2(TCGContext *s) : INDEX_op_st_i64); TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3); - sop->args[0] = temp_arg(s, dir_ts); - sop->args[1] = temp_arg(s, arg_ts->mem_base); + sop->args[0] = temp_arg(dir_ts); + sop->args[1] = temp_arg(arg_ts->mem_base); sop->args[2] = arg_ts->mem_offset; arg_ts->state = TS_MEM; @@ -2403,7 +2403,7 @@ static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots, static void tcg_reg_alloc_movi(TCGContext *s, const TCGOp *op) { - TCGTemp *ots = arg_temp(s, op->args[0]); + TCGTemp *ots = arg_temp(op->args[0]); tcg_target_ulong val = op->args[1]; tcg_reg_alloc_do_movi(s, ots, val, op->life); @@ -2417,8 +2417,8 @@ static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op) TCGType otype, itype; allocated_regs = s->reserved_regs; - ots = arg_temp(s, op->args[0]); - ts = arg_temp(s, op->args[1]); + ots = arg_temp(op->args[0]); + ts = arg_temp(op->args[1]); /* Note that otype != itype for no-op truncation. */ otype = ots->type; @@ -2512,7 +2512,7 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) i = def->sorted_args[nb_oargs + k]; arg = op->args[i]; arg_ct = &def->args_ct[i]; - ts = arg_temp(s, arg); + ts = arg_temp(arg); if (ts->val_type == TEMP_VAL_CONST && tcg_target_const_match(ts->val, ts->type, arg_ct)) { @@ -2569,7 +2569,7 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) /* mark dead temporaries and free the associated registers */ for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { if (IS_DEAD_ARG(i)) { - temp_dead(s, arg_temp(s, op->args[i])); + temp_dead(s, arg_temp(op->args[i])); } } @@ -2595,7 +2595,7 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) i = def->sorted_args[k]; arg = op->args[i]; arg_ct = &def->args_ct[i]; - ts = arg_temp(s, arg); + ts = arg_temp(arg); if ((arg_ct->ct & TCG_CT_ALIAS) && !const_args[arg_ct->alias_index]) { reg = new_args[arg_ct->alias_index]; @@ -2636,7 +2636,7 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) /* move the outputs in the correct register if needed */ for(i = 0; i < nb_oargs; i++) { - ts = arg_temp(s, op->args[i]); + ts = arg_temp(op->args[i]); reg = new_args[i]; if (ts->fixed_reg && ts->reg != reg) { tcg_out_mov(s, ts->type, ts->reg, reg); @@ -2700,7 +2700,7 @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op) stack_offset -= sizeof(tcg_target_long); #endif if (arg != TCG_CALL_DUMMY_ARG) { - ts = arg_temp(s, arg); + ts = arg_temp(arg); temp_load(s, ts, s->tcg_target_available_regs[ts->type], s->reserved_regs); tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset); @@ -2715,7 +2715,7 @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op) for (i = 0; i < nb_regs; i++) { arg = op->args[nb_oargs + i]; if (arg != TCG_CALL_DUMMY_ARG) { - ts = arg_temp(s, arg); + ts = arg_temp(arg); reg = tcg_target_call_iarg_regs[i]; tcg_reg_free(s, reg, allocated_regs); @@ -2737,7 +2737,7 @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op) /* mark dead temporaries and free the associated registers */ for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) { if (IS_DEAD_ARG(i)) { - temp_dead(s, arg_temp(s, op->args[i])); + temp_dead(s, arg_temp(op->args[i])); } } @@ -2763,7 +2763,7 @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op) /* assign output registers and emit moves if needed */ for(i = 0; i < nb_oargs; i++) { arg = op->args[i]; - ts = arg_temp(s, arg); + ts = arg_temp(arg); reg = tcg_target_call_oarg_regs[i]; tcg_debug_assert(s->reg_to_temp[reg] == NULL); @@ -2922,7 +2922,7 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb) } break; case INDEX_op_discard: - temp_dead(s, arg_temp(s, op->args[0])); + temp_dead(s, arg_temp(op->args[0])); break; case INDEX_op_set_label: tcg_reg_alloc_bb_end(s, s->reserved_regs); diff --git a/qemu/tcg/tcg.h b/qemu/tcg/tcg.h index 6d257ccc..df427c1b 100644 --- a/qemu/tcg/tcg.h +++ b/qemu/tcg/tcg.h @@ -431,14 +431,14 @@ typedef TCGv_ptr TCGv_env; #error Unhandled TARGET_LONG_BITS value #endif -/* Dummy definition to avoid compiler warnings. */ -#define TCGV_UNUSED_I32(x) (x = (TCGv_i32)-1) -#define TCGV_UNUSED_I64(x) (x = (TCGv_i64)-1) -#define TCGV_UNUSED_PTR(x) (x = (TCGv_ptr)-1) +/* See the comment before tcgv_i32_temp. */ +#define TCGV_UNUSED_I32(x) (x = (TCGv_i32)NULL) +#define TCGV_UNUSED_I64(x) (x = (TCGv_i64)NULL) +#define TCGV_UNUSED_PTR(x) (x = (TCGv_ptr)NULL) -#define TCGV_IS_UNUSED_I32(x) ((x) == (TCGv_i32)-1) -#define TCGV_IS_UNUSED_I64(x) ((x) == (TCGv_i64)-1) -#define TCGV_IS_UNUSED_PTR(x) ((x) == (TCGv_ptr)-1) +#define TCGV_IS_UNUSED_I32(x) ((x) == (TCGv_i32)NULL) +#define TCGV_IS_UNUSED_I64(x) ((x) == (TCGv_i64)NULL) +#define TCGV_IS_UNUSED_PTR(x) ((x) == (TCGv_ptr)NULL) /* call flags */ /* Helper does not read globals (either directly or through an exception). It @@ -456,8 +456,8 @@ typedef TCGv_ptr TCGv_env; #define TCG_CALL_NO_RWG_SE (TCG_CALL_NO_RWG | TCG_CALL_NO_SE) #define TCG_CALL_NO_WG_SE (TCG_CALL_NO_WG | TCG_CALL_NO_SE) -/* used to align parameters */ -#define TCG_CALL_DUMMY_ARG ((TCGArg)(-1)) +/* Used to align parameters. See the comment before tcgv_i32_temp. */ +#define TCG_CALL_DUMMY_ARG ((TCGArg)0) /* Conditions. Note that these are laid out for easy manipulation by the functions below: @@ -912,64 +912,66 @@ static inline size_t temp_idx(TCGContext *tcg_ctx, TCGTemp *ts) return n; } -static inline TCGArg temp_arg(TCGContext *tcg_ctx, TCGTemp *ts) +static inline TCGArg temp_arg(TCGTemp *ts) { - return temp_idx(tcg_ctx, ts); + return (uintptr_t)ts; } -static inline TCGTemp *arg_temp(TCGContext *tcg_ctx, TCGArg a) +static inline TCGTemp *arg_temp(TCGArg a) { - return a == TCG_CALL_DUMMY_ARG ? NULL : &tcg_ctx->temps[a]; + return (TCGTemp *)(uintptr_t)a; } -static inline size_t arg_index(TCGArg a) +/* Using the offset of a temporary, relative to TCGContext, rather than + its index means that we don't use 0. That leaves offset 0 free for + a NULL representation without having to leave index 0 unused. */ +static inline TCGTemp *tcgv_i32_temp(TCGContext *s, TCGv_i32 v) { - return a; + uintptr_t o = (uintptr_t)v; + TCGTemp *t = (void *)s + o; + tcg_debug_assert(offsetof(TCGContext, temps[temp_idx(s, t)]) == o); + return t; } -static inline TCGArg tcgv_i32_arg(TCGv_i32 t) +static inline TCGTemp *tcgv_i64_temp(TCGContext *s, TCGv_i64 v) { - return (intptr_t)t; + return tcgv_i32_temp(s, (TCGv_i32)v); } -static inline TCGArg tcgv_i64_arg(TCGv_i64 t) +static inline TCGTemp *tcgv_ptr_temp(TCGContext *s, TCGv_ptr v) { - return (intptr_t)t; + return tcgv_i32_temp(s, (TCGv_i32)v); } -static inline TCGArg tcgv_ptr_arg(TCGv_ptr t) +static inline TCGArg tcgv_i32_arg(TCGContext *s, TCGv_i32 v) { - return (intptr_t)t; + return temp_arg(tcgv_i32_temp(s, v)); } -static inline TCGTemp *tcgv_i32_temp(TCGContext *s, TCGv_i32 t) +static inline TCGArg tcgv_i64_arg(TCGContext *s, TCGv_i64 v) { - return arg_temp(s, tcgv_i32_arg(t)); + return temp_arg(tcgv_i64_temp(s, v)); } -static inline TCGTemp *tcgv_i64_temp(TCGContext *s, TCGv_i64 t) +static inline TCGArg tcgv_ptr_arg(TCGContext *s, TCGv_ptr v) { - return arg_temp(s, tcgv_i64_arg(t)); -} - -static inline TCGTemp *tcgv_ptr_temp(TCGContext *s, TCGv_ptr t) -{ - return arg_temp(s, tcgv_ptr_arg(t)); + return temp_arg(tcgv_ptr_temp(s, v)); } static inline TCGv_i32 temp_tcgv_i32(TCGContext *s, TCGTemp *t) { - return (TCGv_i32)temp_idx(s, t); + (void)temp_idx(s, t); /* trigger embedded assert */ + return (TCGv_i32)((void *)t - (void *)s); } static inline TCGv_i64 temp_tcgv_i64(TCGContext *s, TCGTemp *t) { - return (TCGv_i64)temp_idx(s, t); + return (TCGv_i64)temp_tcgv_i32(s, t); } static inline TCGv_ptr temp_tcgv_ptr(TCGContext *s, TCGTemp *t) { - return (TCGv_ptr)temp_idx(s, t); + return (TCGv_ptr)temp_tcgv_i32(s, t); } #if TCG_TARGET_REG_BITS == 32