mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2024-12-23 00:45:40 +00:00
target/riscv: Check nanboxed inputs in trans_rvf.inc.c
If a 32-bit input is not properly nanboxed, then the input is replaced with the default qnan. The only inline expansion is for the sign-changing set of instructions: FSGNJ.S, FSGNJX.S, FSGNJN.S. Backports ffe70e4dfc9cf2a6934e674b81b69c847b403c4b
This commit is contained in:
parent
ce54dfb4f7
commit
3af34d3df4
|
@ -183,13 +183,21 @@ static bool trans_fsgnj_s(DisasContext *ctx, arg_fsgnj_s *a)
|
|||
|
||||
REQUIRE_FPU;
|
||||
REQUIRE_EXT(ctx, RVF);
|
||||
|
||||
if (a->rs1 == a->rs2) { /* FMOV */
|
||||
tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_fpr_risc[a->rd], tcg_ctx->cpu_fpr_risc[a->rs1]);
|
||||
gen_check_nanbox_s(ctx, tcg_ctx->cpu_fpr_risc[a->rd], tcg_ctx->cpu_fpr_risc[a->rs1]);
|
||||
} else { /* FSGNJ */
|
||||
tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->cpu_fpr_risc[a->rd], tcg_ctx->cpu_fpr_risc[a->rs2], tcg_ctx->cpu_fpr_risc[a->rs1],
|
||||
0, 31);
|
||||
TCGv_i64 rs1 = tcg_temp_new_i64(tcg_ctx);
|
||||
TCGv_i64 rs2 = tcg_temp_new_i64(tcg_ctx);
|
||||
|
||||
gen_check_nanbox_s(ctx, rs1, tcg_ctx->cpu_fpr_risc[a->rs1]);
|
||||
gen_check_nanbox_s(ctx, rs2, tcg_ctx->cpu_fpr_risc[a->rs2]);
|
||||
|
||||
/* This formulation retains the nanboxing of rs2. */
|
||||
tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->cpu_fpr_risc[a->rd], rs2, rs1, 0, 31);
|
||||
tcg_temp_free_i64(tcg_ctx, rs1);
|
||||
tcg_temp_free_i64(tcg_ctx, rs2);
|
||||
}
|
||||
gen_nanbox_s(ctx, tcg_ctx->cpu_fpr_risc[a->rd], tcg_ctx->cpu_fpr_risc[a->rd]);
|
||||
mark_fs_dirty(ctx);
|
||||
return true;
|
||||
}
|
||||
|
@ -197,18 +205,34 @@ static bool trans_fsgnj_s(DisasContext *ctx, arg_fsgnj_s *a)
|
|||
static bool trans_fsgnjn_s(DisasContext *ctx, arg_fsgnjn_s *a)
|
||||
{
|
||||
TCGContext *tcg_ctx = ctx->uc->tcg_ctx;
|
||||
TCGv_i64 rs1, rs2, mask;
|
||||
|
||||
REQUIRE_FPU;
|
||||
REQUIRE_EXT(ctx, RVF);
|
||||
|
||||
rs1 = tcg_temp_new_i64(tcg_ctx);
|
||||
gen_check_nanbox_s(ctx, rs1, tcg_ctx->cpu_fpr_risc[a->rs1]);
|
||||
|
||||
if (a->rs1 == a->rs2) { /* FNEG */
|
||||
tcg_gen_xori_i64(tcg_ctx, tcg_ctx->cpu_fpr_risc[a->rd], tcg_ctx->cpu_fpr_risc[a->rs1], INT32_MIN);
|
||||
tcg_gen_xori_i64(tcg_ctx, tcg_ctx->cpu_fpr_risc[a->rd], rs1, MAKE_64BIT_MASK(31, 1));
|
||||
} else {
|
||||
TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx);
|
||||
tcg_gen_not_i64(tcg_ctx, t0, tcg_ctx->cpu_fpr_risc[a->rs2]);
|
||||
tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->cpu_fpr_risc[a->rd], t0, tcg_ctx->cpu_fpr_risc[a->rs1], 0, 31);
|
||||
tcg_temp_free_i64(tcg_ctx, t0);
|
||||
rs2 = tcg_temp_new_i64(tcg_ctx);
|
||||
gen_check_nanbox_s(ctx, rs2, tcg_ctx->cpu_fpr_risc[a->rs2]);
|
||||
|
||||
/*
|
||||
* Replace bit 31 in rs1 with inverse in rs2.
|
||||
* This formulation retains the nanboxing of rs1.
|
||||
*/
|
||||
mask = tcg_const_i64(tcg_ctx, ~MAKE_64BIT_MASK(31, 1));
|
||||
tcg_gen_nor_i64(tcg_ctx, rs2, rs2, mask);
|
||||
tcg_gen_and_i64(tcg_ctx, rs1, mask, rs1);
|
||||
tcg_gen_or_i64(tcg_ctx, tcg_ctx->cpu_fpr_risc[a->rd], rs1, rs2);
|
||||
|
||||
tcg_temp_free_i64(tcg_ctx, mask);
|
||||
tcg_temp_free_i64(tcg_ctx, rs2);
|
||||
}
|
||||
gen_nanbox_s(ctx, tcg_ctx->cpu_fpr_risc[a->rd], tcg_ctx->cpu_fpr_risc[a->rd]);
|
||||
tcg_temp_free_i64(tcg_ctx, rs1);
|
||||
|
||||
mark_fs_dirty(ctx);
|
||||
return true;
|
||||
}
|
||||
|
@ -216,18 +240,31 @@ static bool trans_fsgnjn_s(DisasContext *ctx, arg_fsgnjn_s *a)
|
|||
static bool trans_fsgnjx_s(DisasContext *ctx, arg_fsgnjx_s *a)
|
||||
{
|
||||
TCGContext *tcg_ctx = ctx->uc->tcg_ctx;
|
||||
TCGv_i64 rs1, rs2;
|
||||
|
||||
REQUIRE_FPU;
|
||||
REQUIRE_EXT(ctx, RVF);
|
||||
|
||||
rs1 = tcg_temp_new_i64(tcg_ctx);
|
||||
gen_check_nanbox_s(ctx, rs1, tcg_ctx->cpu_fpr_risc[a->rs1]);
|
||||
|
||||
if (a->rs1 == a->rs2) { /* FABS */
|
||||
tcg_gen_andi_i64(tcg_ctx, tcg_ctx->cpu_fpr_risc[a->rd], tcg_ctx->cpu_fpr_risc[a->rs1], ~INT32_MIN);
|
||||
tcg_gen_andi_i64(tcg_ctx, tcg_ctx->cpu_fpr_risc[a->rd], rs1, ~MAKE_64BIT_MASK(31, 1));
|
||||
} else {
|
||||
TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx);
|
||||
tcg_gen_andi_i64(tcg_ctx, t0, tcg_ctx->cpu_fpr_risc[a->rs2], INT32_MIN);
|
||||
tcg_gen_xor_i64(tcg_ctx, tcg_ctx->cpu_fpr_risc[a->rd], tcg_ctx->cpu_fpr_risc[a->rs1], t0);
|
||||
tcg_temp_free_i64(tcg_ctx, t0);
|
||||
rs2 = tcg_temp_new_i64(tcg_ctx);
|
||||
gen_check_nanbox_s(ctx, rs2, tcg_ctx->cpu_fpr_risc[a->rs2]);
|
||||
|
||||
/*
|
||||
* Xor bit 31 in rs1 with that in rs2.
|
||||
* This formulation retains the nanboxing of rs1.
|
||||
*/
|
||||
tcg_gen_andi_i64(tcg_ctx, rs2, rs2, MAKE_64BIT_MASK(31, 1));
|
||||
tcg_gen_xor_i64(tcg_ctx, tcg_ctx->cpu_fpr_risc[a->rd], rs1, rs2);
|
||||
|
||||
tcg_temp_free_i64(tcg_ctx, rs2);
|
||||
}
|
||||
gen_nanbox_s(ctx, tcg_ctx->cpu_fpr_risc[a->rd], tcg_ctx->cpu_fpr_risc[a->rd]);
|
||||
tcg_temp_free_i64(tcg_ctx, rs1);
|
||||
|
||||
mark_fs_dirty(ctx);
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -105,6 +105,25 @@ static void gen_nanbox_s(DisasContext *s, TCGv_i64 out, TCGv_i64 in)
|
|||
tcg_gen_ori_i64(tcg_ctx, out, in, MAKE_64BIT_MASK(32, 32));
|
||||
}
|
||||
|
||||
/*
|
||||
* A narrow n-bit operation, where n < FLEN, checks that input operands
|
||||
* are correctly Nan-boxed, i.e., all upper FLEN - n bits are 1.
|
||||
* If so, the least-significant bits of the input are used, otherwise the
|
||||
* input value is treated as an n-bit canonical NaN (v2.2 section 9.2).
|
||||
*
|
||||
* Here, the result is always nan-boxed, even the canonical nan.
|
||||
*/
|
||||
static void gen_check_nanbox_s(DisasContext *s, TCGv_i64 out, TCGv_i64 in)
|
||||
{
|
||||
TCGContext *tcg_ctx = s->uc->tcg_ctx;
|
||||
TCGv_i64 t_max = tcg_const_i64(tcg_ctx, 0xffffffff00000000ull);
|
||||
TCGv_i64 t_nan = tcg_const_i64(tcg_ctx, 0xffffffff7fc00000ull);
|
||||
|
||||
tcg_gen_movcond_i64(tcg_ctx, TCG_COND_GEU, out, in, t_max, in, t_nan);
|
||||
tcg_temp_free_i64(tcg_ctx, t_max);
|
||||
tcg_temp_free_i64(tcg_ctx, t_nan);
|
||||
}
|
||||
|
||||
static void generate_exception(DisasContext *ctx, int excp)
|
||||
{
|
||||
TCGContext *tcg_ctx = ctx->uc->tcg_ctx;
|
||||
|
|
Loading…
Reference in a new issue