mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2025-01-22 13:51:11 +00:00
tcg: rename trunc_shr_i32 into trunc_shr_i64_i32
The op is sometimes named trunc_shr_i32 and sometimes trunc_shr_i64_i32, and the name in the README doesn't match the name offered to the frontends. Always use the long name to make it clear it is a size changing op. Backports commit 0632e555fc4d281d69cb08d98d500d96185b041f from qemu
This commit is contained in:
parent
5f0920ad0f
commit
80223e7ad5
|
@ -314,7 +314,7 @@ This operation would be equivalent to
|
|||
|
||||
dest = (t1 & ~0x0f00) | ((t2 << 8) & 0x0f00)
|
||||
|
||||
* trunc_shr_i32 t0, t1, pos
|
||||
* trunc_shr_i64_i32 t0, t1, pos
|
||||
|
||||
For 64-bit hosts only, right shift the 64-bit input T1 by POS and
|
||||
truncate to 32-bit output T0. Depending on the host, this may be
|
||||
|
|
|
@ -69,7 +69,7 @@ typedef enum {
|
|||
#define TCG_TARGET_HAS_muls2_i32 0
|
||||
#define TCG_TARGET_HAS_muluh_i32 0
|
||||
#define TCG_TARGET_HAS_mulsh_i32 0
|
||||
#define TCG_TARGET_HAS_trunc_shr_i32 0
|
||||
#define TCG_TARGET_HAS_trunc_shr_i64_i32 0
|
||||
|
||||
#define TCG_TARGET_HAS_div_i64 1
|
||||
#define TCG_TARGET_HAS_rem_i64 1
|
||||
|
|
|
@ -101,7 +101,7 @@ extern bool have_bmi1;
|
|||
#define TCG_TARGET_HAS_mulsh_i32 0
|
||||
|
||||
#if TCG_TARGET_REG_BITS == 64
|
||||
#define TCG_TARGET_HAS_trunc_shr_i32 0
|
||||
#define TCG_TARGET_HAS_trunc_shr_i64_i32 0
|
||||
#define TCG_TARGET_HAS_div2_i64 1
|
||||
#define TCG_TARGET_HAS_rot_i64 1
|
||||
#define TCG_TARGET_HAS_ext8s_i64 1
|
||||
|
|
|
@ -293,7 +293,7 @@ static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y)
|
|||
case INDEX_op_shr_i32:
|
||||
return (uint32_t)x >> (y & 31);
|
||||
|
||||
case INDEX_op_trunc_shr_i32:
|
||||
case INDEX_op_trunc_shr_i64_i32:
|
||||
case INDEX_op_shr_i64:
|
||||
return (uint64_t)x >> (y & 63);
|
||||
|
||||
|
@ -886,7 +886,7 @@ void tcg_optimize(TCGContext *s)
|
|||
}
|
||||
break;
|
||||
|
||||
case INDEX_op_trunc_shr_i32:
|
||||
case INDEX_op_trunc_shr_i64_i32:
|
||||
mask = (uint64_t)temps[args[1]].mask >> args[2];
|
||||
break;
|
||||
|
||||
|
@ -1034,7 +1034,7 @@ void tcg_optimize(TCGContext *s)
|
|||
}
|
||||
goto do_default;
|
||||
|
||||
case INDEX_op_trunc_shr_i32:
|
||||
case INDEX_op_trunc_shr_i64_i32:
|
||||
if (temp_is_const(s, args[1])) {
|
||||
tmp = do_constant_folding(s, opc, temps[args[1]].val, args[2]);
|
||||
tcg_opt_gen_movi(s, op, args, args[0], tmp);
|
||||
|
|
|
@ -76,7 +76,7 @@ typedef enum {
|
|||
#if TCG_TARGET_REG_BITS == 64
|
||||
#define TCG_TARGET_HAS_add2_i32 0
|
||||
#define TCG_TARGET_HAS_sub2_i32 0
|
||||
#define TCG_TARGET_HAS_trunc_shr_i32 0
|
||||
#define TCG_TARGET_HAS_trunc_shr_i64_i32 0
|
||||
#define TCG_TARGET_HAS_div_i64 1
|
||||
#define TCG_TARGET_HAS_rem_i64 0
|
||||
#define TCG_TARGET_HAS_rot_i64 1
|
||||
|
|
|
@ -71,7 +71,7 @@ typedef enum TCGReg {
|
|||
#define TCG_TARGET_HAS_muls2_i32 0
|
||||
#define TCG_TARGET_HAS_muluh_i32 0
|
||||
#define TCG_TARGET_HAS_mulsh_i32 0
|
||||
#define TCG_TARGET_HAS_trunc_shr_i32 0
|
||||
#define TCG_TARGET_HAS_trunc_shr_i64_i32 0
|
||||
|
||||
#define TCG_TARGET_HAS_div2_i64 1
|
||||
#define TCG_TARGET_HAS_rot_i64 1
|
||||
|
|
|
@ -1413,7 +1413,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
|
|||
case INDEX_op_ext32u_i64:
|
||||
tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL);
|
||||
break;
|
||||
case INDEX_op_trunc_shr_i32:
|
||||
case INDEX_op_trunc_shr_i64_i32:
|
||||
if (a2 == 0) {
|
||||
tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
|
||||
} else {
|
||||
|
@ -1533,7 +1533,7 @@ static const TCGTargetOpDef sparc_op_defs[] = {
|
|||
|
||||
{ INDEX_op_ext32s_i64, { "R", "r" } },
|
||||
{ INDEX_op_ext32u_i64, { "R", "r" } },
|
||||
{ INDEX_op_trunc_shr_i32, { "r", "R" } },
|
||||
{ INDEX_op_trunc_shr_i64_i32, { "r", "R" } },
|
||||
|
||||
{ INDEX_op_brcond_i64, { "RZ", "RJ" } },
|
||||
{ INDEX_op_setcond_i64, { "R", "RZ", "RJ" } },
|
||||
|
|
|
@ -117,7 +117,7 @@ extern bool use_vis3_instructions;
|
|||
#define TCG_TARGET_HAS_muluh_i32 0
|
||||
#define TCG_TARGET_HAS_mulsh_i32 0
|
||||
|
||||
#define TCG_TARGET_HAS_trunc_shr_i32 1
|
||||
#define TCG_TARGET_HAS_trunc_shr_i64_i32 1
|
||||
#define TCG_TARGET_HAS_div_i64 1
|
||||
#define TCG_TARGET_HAS_rem_i64 0
|
||||
#define TCG_TARGET_HAS_rot_i64 0
|
||||
|
|
|
@ -1752,8 +1752,8 @@ void tcg_gen_trunc_shr_i64_i32(TCGContext *s, TCGv_i32 ret, TCGv_i64 arg, unsign
|
|||
tcg_gen_mov_i32(s, ret, TCGV_LOW(t));
|
||||
tcg_temp_free_i64(s, t);
|
||||
}
|
||||
} else if (TCG_TARGET_HAS_trunc_shr_i32) {
|
||||
tcg_gen_op3i_i32(s, INDEX_op_trunc_shr_i32, ret,
|
||||
} else if (TCG_TARGET_HAS_trunc_shr_i64_i32) {
|
||||
tcg_gen_op3i_i32(s, INDEX_op_trunc_shr_i64_i32, ret,
|
||||
MAKE_TCGV_I32(GET_TCGV_I64(arg)), count);
|
||||
} else if (count == 0) {
|
||||
tcg_gen_mov_i32(s, ret, MAKE_TCGV_I32(GET_TCGV_I64(arg)));
|
||||
|
|
|
@ -143,8 +143,8 @@ DEF(rotl_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rot_i64))
|
|||
DEF(rotr_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rot_i64))
|
||||
DEF(deposit_i64, 1, 2, 2, IMPL64 | IMPL(TCG_TARGET_HAS_deposit_i64))
|
||||
|
||||
DEF(trunc_shr_i32, 1, 1, 1,
|
||||
IMPL(TCG_TARGET_HAS_trunc_shr_i32)
|
||||
DEF(trunc_shr_i64_i32, 1, 1, 1,
|
||||
IMPL(TCG_TARGET_HAS_trunc_shr_i64_i32)
|
||||
| (TCG_TARGET_REG_BITS == 32 ? TCG_OPF_NOT_PRESENT : 0))
|
||||
|
||||
DEF(brcond_i64, 0, 2, 2, TCG_OPF_BB_END | IMPL64)
|
||||
|
|
|
@ -67,7 +67,7 @@ typedef uint64_t TCGRegSet;
|
|||
|
||||
#if TCG_TARGET_REG_BITS == 32
|
||||
/* Turn some undef macros into false macros. */
|
||||
#define TCG_TARGET_HAS_trunc_shr_i32 0
|
||||
#define TCG_TARGET_HAS_trunc_shr_i64_i32 0
|
||||
#define TCG_TARGET_HAS_div_i64 0
|
||||
#define TCG_TARGET_HAS_rem_i64 0
|
||||
#define TCG_TARGET_HAS_div2_i64 0
|
||||
|
|
Loading…
Reference in a new issue