tcg: Convert tcg_gen_dupi_vec to TCG_CONST

Because we now store uint64_t in TCGTemp, we can now always
store the full 64-bit duplicate immediate. So remove the
difference between 32- and 64-bit hosts.

Backports 0b4286dd15e2bcaf2aa53dfac0fb3103690f5a34
This commit is contained in:
Richard Henderson 2021-03-04 12:18:57 -05:00 committed by Lioncash
parent 541ef541ae
commit 6e54b46d28
3 changed files with 15 additions and 40 deletions

View file

@ -1123,11 +1123,10 @@ void tcg_optimize(TCGContext *s)
case INDEX_op_dup2_vec:
assert(TCG_TARGET_REG_BITS == 32);
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
tmp = arg_info(op->args[1])->val;
if (tmp == arg_info(op->args[2])->val) {
tcg_opt_gen_movi(s, temps_used, op, op->args[0], tmp);
break;
}
tcg_opt_gen_movi(s, temps_used, op, op->args[0],
deposit64(arg_info(op->args[1])->val, 32, 32,
arg_info(op->args[2])->val));
break;
} else if (args_are_copies(op->args[1], op->args[2])) {
op->opc = INDEX_op_dup_vec;
TCGOP_VECE(op) = MO_32;

View file

@ -218,25 +218,17 @@ void tcg_gen_mov_vec(TCGContext *s, TCGv_vec r, TCGv_vec a)
}
}
#define MO_REG (TCG_TARGET_REG_BITS == 64 ? MO_64 : MO_32)
static void do_dupi_vec(TCGContext *s, TCGv_vec r, unsigned vece, TCGArg a)
{
TCGTemp *rt = tcgv_vec_temp(s, r);
vec_gen_2(s, INDEX_op_dupi_vec, rt->base_type, vece, temp_arg(rt), a);
}
TCGv_vec tcg_const_zeros_vec(TCGContext *s, TCGType type)
{
TCGv_vec ret = tcg_temp_new_vec(s, type);
do_dupi_vec(s, ret, MO_REG, 0);
tcg_gen_dupi_vec(s, MO_64, ret, 0);
return ret;
}
TCGv_vec tcg_const_ones_vec(TCGContext *s, TCGType type)
{
TCGv_vec ret = tcg_temp_new_vec(s, type);
do_dupi_vec(s, ret, MO_REG, -1);
tcg_gen_dupi_vec(s, MO_64, ret, -1);
return ret;
}
@ -254,39 +246,28 @@ TCGv_vec tcg_const_ones_vec_matching(TCGContext *s, TCGv_vec m)
void tcg_gen_dup64i_vec(TCGContext *s, TCGv_vec r, uint64_t a)
{
if (TCG_TARGET_REG_BITS == 64) {
do_dupi_vec(s, r, MO_64, a);
} else if (a == dup_const(MO_32, a)) {
do_dupi_vec(s, r, MO_32, a);
} else {
TCGv_i64 c = tcg_const_i64(s, a);
tcg_gen_dup_i64_vec(s, MO_64, r, c);
tcg_temp_free_i64(s, c);
}
tcg_gen_dupi_vec(s, MO_64, r, a);
}
void tcg_gen_dup32i_vec(TCGContext *s, TCGv_vec r, uint32_t a)
{
do_dupi_vec(s, r, MO_REG, dup_const(MO_32, a));
tcg_gen_dupi_vec(s, MO_32, r, a);
}
void tcg_gen_dup16i_vec(TCGContext *s, TCGv_vec r, uint32_t a)
{
do_dupi_vec(s, r, MO_REG, dup_const(MO_16, a));
tcg_gen_dupi_vec(s, MO_16, r, a);
}
void tcg_gen_dup8i_vec(TCGContext *s, TCGv_vec r, uint32_t a)
{
do_dupi_vec(s, r, MO_REG, dup_const(MO_8, a));
tcg_gen_dupi_vec(s, MO_8, r, a);
}
void tcg_gen_dupi_vec(TCGContext *s, unsigned vece, TCGv_vec r, uint64_t a)
{
if (vece == MO_64) {
tcg_gen_dup64i_vec(s, r, a);
} else {
do_dupi_vec(s, r, MO_REG, dup_const(vece, a));
}
TCGTemp *rt = tcgv_vec_temp(s, r);
tcg_gen_mov_vec(s, r, tcg_constant_vec(s, rt->base_type, vece, a));
}
void tcg_gen_dup_i64_vec(TCGContext *s, unsigned vece, TCGv_vec r, TCGv_i64 a)
@ -491,8 +472,8 @@ void tcg_gen_abs_vec(TCGContext *s, unsigned vece, TCGv_vec r, TCGv_vec a)
if (tcg_can_emit_vec_op(INDEX_op_sari_vec, type, vece) > 0) {
tcg_gen_sari_vec(s, vece, t, a, (8 << vece) - 1);
} else {
do_dupi_vec(s, t, MO_REG, 0);
tcg_gen_cmp_vec(s, TCG_COND_LT, vece, t, a, t);
tcg_gen_cmp_vec(s, TCG_COND_LT, vece, t, a,
tcg_constant_vec(s, type, vece, 0));
}
tcg_gen_xor_vec(s, vece, r, a, t);
tcg_gen_sub_vec(s, vece, r, r, t);

View file

@ -2879,16 +2879,11 @@ static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs,
* The targets will, in general, have to do this search anyway,
* do this generically.
*/
if (TCG_TARGET_REG_BITS == 32) {
val = dup_const(MO_32, val);
vece = MO_32;
}
if (val == dup_const(MO_8, val)) {
vece = MO_8;
} else if (val == dup_const(MO_16, val)) {
vece = MO_16;
} else if (TCG_TARGET_REG_BITS == 64 &&
val == dup_const(MO_32, val)) {
} else if (val == dup_const(MO_32, val)) {
vece = MO_32;
}