tcg: Move some opcode generation functions out of line

Backports commit 951c6300f74ace35d87c079affc57cfc513a6a35 from qemu
This commit is contained in:
Lioncash 2018-02-08 15:19:28 -05:00
parent cb7b19ad26
commit 203b2107d6
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7
4 changed files with 2338 additions and 2278 deletions

2011
qemu/tcg/tcg-op.c Normal file

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -853,176 +853,6 @@ void tcg_gen_callN(TCGContext *s, void *func, TCGArg ret,
#endif /* TCG_TARGET_EXTEND_ARGS */
}
#if TCG_TARGET_REG_BITS == 32
void tcg_gen_shifti_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1,
int c, int right, int arith)
{
if (c == 0) {
tcg_gen_mov_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1));
tcg_gen_mov_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1));
} else if (c >= 32) {
c -= 32;
if (right) {
if (arith) {
tcg_gen_sari_i32(s, TCGV_LOW(ret), TCGV_HIGH(arg1), c);
tcg_gen_sari_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1), 31);
} else {
tcg_gen_shri_i32(s, TCGV_LOW(ret), TCGV_HIGH(arg1), c);
tcg_gen_movi_i32(s, TCGV_HIGH(ret), 0);
}
} else {
tcg_gen_shli_i32(s, TCGV_HIGH(ret), TCGV_LOW(arg1), c);
tcg_gen_movi_i32(s, TCGV_LOW(ret), 0);
}
} else {
TCGv_i32 t0, t1;
t0 = tcg_temp_new_i32(s);
t1 = tcg_temp_new_i32(s);
if (right) {
tcg_gen_shli_i32(s, t0, TCGV_HIGH(arg1), 32 - c);
if (arith)
tcg_gen_sari_i32(s, t1, TCGV_HIGH(arg1), c);
else
tcg_gen_shri_i32(s, t1, TCGV_HIGH(arg1), c);
tcg_gen_shri_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1), c);
tcg_gen_or_i32(s, TCGV_LOW(ret), TCGV_LOW(ret), t0);
tcg_gen_mov_i32(s, TCGV_HIGH(ret), t1);
} else {
tcg_gen_shri_i32(s, t0, TCGV_LOW(arg1), 32 - c);
/* Note: ret can be the same as arg1, so we use t1 */
tcg_gen_shli_i32(s, t1, TCGV_LOW(arg1), c);
tcg_gen_shli_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1), c);
tcg_gen_or_i32(s, TCGV_HIGH(ret), TCGV_HIGH(ret), t0);
tcg_gen_mov_i32(s, TCGV_LOW(ret), t1);
}
tcg_temp_free_i32(s, t0);
tcg_temp_free_i32(s, t1);
}
}
#endif
static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st)
{
switch (op & MO_SIZE) {
case MO_8:
op &= ~MO_BSWAP;
break;
case MO_16:
break;
case MO_32:
if (!is64) {
op &= ~MO_SIGN;
}
break;
case MO_64:
if (!is64) {
tcg_abort();
}
break;
}
if (st) {
op &= ~MO_SIGN;
}
return op;
}
// Unicorn engine
// check if the last memory access was invalid
// if so, we jump to the block epilogue to quit immediately.
void check_exit_request(TCGContext *tcg_ctx)
{
TCGv_i32 flag;
flag = tcg_temp_new_i32(tcg_ctx);
tcg_gen_ld_i32(tcg_ctx, flag, tcg_ctx->cpu_env,
offsetof(CPUState, tcg_exit_req) - ENV_OFFSET);
tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, flag, 0, tcg_ctx->exitreq_label);
tcg_temp_free_i32(tcg_ctx, flag);
}
void tcg_gen_qemu_ld_i32(struct uc_struct *uc, TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
TCGContext *tcg_ctx = uc->tcg_ctx;
memop = tcg_canonicalize_memop(memop, 0, 0);
*tcg_ctx->gen_opc_ptr++ = INDEX_op_qemu_ld_i32;
tcg_add_param_i32(tcg_ctx, val);
tcg_add_param_tl(tcg_ctx, addr);
*tcg_ctx->gen_opparam_ptr++ = memop;
*tcg_ctx->gen_opparam_ptr++ = idx;
check_exit_request(tcg_ctx);
}
void tcg_gen_qemu_st_i32(struct uc_struct *uc, TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
TCGContext *tcg_ctx = uc->tcg_ctx;
memop = tcg_canonicalize_memop(memop, 0, 1);
*tcg_ctx->gen_opc_ptr++ = INDEX_op_qemu_st_i32;
tcg_add_param_i32(tcg_ctx, val);
tcg_add_param_tl(tcg_ctx, addr);
*tcg_ctx->gen_opparam_ptr++ = memop;
*tcg_ctx->gen_opparam_ptr++ = idx;
check_exit_request(tcg_ctx);
}
void tcg_gen_qemu_ld_i64(struct uc_struct *uc, TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
TCGContext *tcg_ctx = uc->tcg_ctx;
memop = tcg_canonicalize_memop(memop, 1, 0);
#if TCG_TARGET_REG_BITS == 32
if ((memop & MO_SIZE) < MO_64) {
tcg_gen_qemu_ld_i32(uc, TCGV_LOW(val), addr, idx, memop);
if (memop & MO_SIGN) {
tcg_gen_sari_i32(tcg_ctx, TCGV_HIGH(val), TCGV_LOW(val), 31);
} else {
tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(val), 0);
}
check_exit_request(tcg_ctx);
return;
}
#endif
*tcg_ctx->gen_opc_ptr++ = INDEX_op_qemu_ld_i64;
tcg_add_param_i64(tcg_ctx, val);
tcg_add_param_tl(tcg_ctx, addr);
*tcg_ctx->gen_opparam_ptr++ = memop;
*tcg_ctx->gen_opparam_ptr++ = idx;
check_exit_request(tcg_ctx);
}
void tcg_gen_qemu_st_i64(struct uc_struct *uc, TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
TCGContext *tcg_ctx = uc->tcg_ctx;
memop = tcg_canonicalize_memop(memop, 1, 1);
#if TCG_TARGET_REG_BITS == 32
if ((memop & MO_SIZE) < MO_64) {
tcg_gen_qemu_st_i32(uc, TCGV_LOW(val), addr, idx, memop);
check_exit_request(tcg_ctx);
return;
}
#endif
*tcg_ctx->gen_opc_ptr++ = INDEX_op_qemu_st_i64;
tcg_add_param_i64(tcg_ctx, val);
tcg_add_param_tl(tcg_ctx, addr);
*tcg_ctx->gen_opparam_ptr++ = memop;
*tcg_ctx->gen_opparam_ptr++ = idx;
check_exit_request(tcg_ctx);
}
static void tcg_reg_alloc_start(TCGContext *s)
{
int i;

View file

@ -830,9 +830,6 @@ void tcg_add_target_add_op_defs(TCGContext *s, const TCGTargetOpDef *tdefs);
void tcg_gen_callN(TCGContext *s, void *func,
TCGArg ret, int nargs, TCGArg *args);
void tcg_gen_shifti_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1,
int c, int right, int arith);
TCGArg *tcg_optimize(TCGContext *s, uint16_t *tcg_opc_ptr, TCGArg *args,
TCGOpDef *tcg_op_def);
@ -1025,8 +1022,6 @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
# define helper_ret_stq_mmu helper_le_stq_mmu
#endif
void check_exit_request(TCGContext *tcg_ctx);
#endif /* CONFIG_SOFTMMU */
#endif /* TCG_H */