mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2025-03-30 21:27:00 +00:00
tcg: Dynamically allocate TCGOps
With no fixed array allocation, we can't overflow a buffer. This will be important as optimizations related to host vectors may expand the number of ops used. Use QTAILQ to link the ops together. Backports commit 15fa08f8451babc88d733bd411d4c94976f9d0f8 from qemu
This commit is contained in:
parent
5f074f09ab
commit
7fe5f620df
|
@ -2977,6 +2977,7 @@
|
|||
#define tcg_current_code_size tcg_current_code_size_aarch64
|
||||
#define tcg_dump_info tcg_dump_info_aarch64
|
||||
#define tcg_dump_ops tcg_dump_ops_aarch64
|
||||
#define tcg_emit_op tcg_emit_op_aarch64
|
||||
#define tcg_enabled tcg_enabled_aarch64
|
||||
#define tcg_exec_all tcg_exec_all_aarch64
|
||||
#define tcg_exec_init tcg_exec_init_aarch64
|
||||
|
|
|
@ -2977,6 +2977,7 @@
|
|||
#define tcg_current_code_size tcg_current_code_size_aarch64eb
|
||||
#define tcg_dump_info tcg_dump_info_aarch64eb
|
||||
#define tcg_dump_ops tcg_dump_ops_aarch64eb
|
||||
#define tcg_emit_op tcg_emit_op_aarch64eb
|
||||
#define tcg_enabled tcg_enabled_aarch64eb
|
||||
#define tcg_exec_all tcg_exec_all_aarch64eb
|
||||
#define tcg_exec_init tcg_exec_init_aarch64eb
|
||||
|
|
|
@ -2977,6 +2977,7 @@
|
|||
#define tcg_current_code_size tcg_current_code_size_arm
|
||||
#define tcg_dump_info tcg_dump_info_arm
|
||||
#define tcg_dump_ops tcg_dump_ops_arm
|
||||
#define tcg_emit_op tcg_emit_op_arm
|
||||
#define tcg_enabled tcg_enabled_arm
|
||||
#define tcg_exec_all tcg_exec_all_arm
|
||||
#define tcg_exec_init tcg_exec_init_arm
|
||||
|
|
|
@ -2977,6 +2977,7 @@
|
|||
#define tcg_current_code_size tcg_current_code_size_armeb
|
||||
#define tcg_dump_info tcg_dump_info_armeb
|
||||
#define tcg_dump_ops tcg_dump_ops_armeb
|
||||
#define tcg_emit_op tcg_emit_op_armeb
|
||||
#define tcg_enabled tcg_enabled_armeb
|
||||
#define tcg_exec_all tcg_exec_all_armeb
|
||||
#define tcg_exec_init tcg_exec_init_armeb
|
||||
|
|
|
@ -2983,6 +2983,7 @@ symbols = (
|
|||
'tcg_current_code_size',
|
||||
'tcg_dump_info',
|
||||
'tcg_dump_ops',
|
||||
'tcg_emit_op',
|
||||
'tcg_enabled',
|
||||
'tcg_exec_all',
|
||||
'tcg_exec_init',
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
/* Helpers for instruction counting code generation. */
|
||||
|
||||
//static int icount_start_insn_idx
|
||||
//static TCGOp *icount_start_insn;
|
||||
//static TCGLabel *icount_label;
|
||||
//static TCGLabel *exitreq_label;
|
||||
|
||||
|
@ -34,7 +34,7 @@ static inline void gen_tb_start(TCGContext *tcg_ctx, TranslationBlock *tb)
|
|||
/* We emit a movi with a dummy immediate argument. Keep the insn index
|
||||
* of the movi so that we later (when we know the actual insn count)
|
||||
* can update the immediate argument with the actual insn count. */
|
||||
icount_start_insn_idx = tcg_op_buf_count(tcg_ctx);
|
||||
icount_start_insn = tcg_last_op(tcg_ctx);
|
||||
tcg_gen_movi_i32(tcg_ctx, imm, 0xdeadbeef);
|
||||
|
||||
tcg_gen_sub_i32(tcg_ctx, count, count, imm);
|
||||
|
@ -56,15 +56,11 @@ static inline void gen_tb_end(TCGContext *tcg_ctx, TranslationBlock *tb, int num
|
|||
if (tb->cflags & CF_USE_ICOUNT) {
|
||||
/* Update the num_insn immediate parameter now that we know
|
||||
* the actual insn count. */
|
||||
tcg_set_insn_param(tcg_ctx, icount_start_insn_idx, 1, num_insns);
|
||||
tcg_set_insn_param(tcg_ctx->icount_start_insn, 1, num_insns);
|
||||
gen_set_label(tcg_ctx, icount_label);
|
||||
tcg_gen_exit_tb(tcg_ctx, (uintptr_t)tb + TB_EXIT_ICOUNT_EXPIRED);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Terminate the linked list. */
|
||||
tcg_ctx->gen_op_buf[tcg_ctx->gen_op_buf[0].prev].next = 0;
|
||||
|
||||
}
|
||||
|
||||
#if 0
|
||||
|
|
|
@ -399,6 +399,11 @@ struct { \
|
|||
(var); \
|
||||
(var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
|
||||
|
||||
#define QTAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, prev_var) \
|
||||
for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \
|
||||
(var) && ((prev_var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)), 1); \
|
||||
(var) = (prev_var))
|
||||
|
||||
/*
|
||||
* Tail queue access methods.
|
||||
*/
|
||||
|
|
|
@ -2977,6 +2977,7 @@
|
|||
#define tcg_current_code_size tcg_current_code_size_m68k
|
||||
#define tcg_dump_info tcg_dump_info_m68k
|
||||
#define tcg_dump_ops tcg_dump_ops_m68k
|
||||
#define tcg_emit_op tcg_emit_op_m68k
|
||||
#define tcg_enabled tcg_enabled_m68k
|
||||
#define tcg_exec_all tcg_exec_all_m68k
|
||||
#define tcg_exec_init tcg_exec_init_m68k
|
||||
|
|
|
@ -2977,6 +2977,7 @@
|
|||
#define tcg_current_code_size tcg_current_code_size_mips
|
||||
#define tcg_dump_info tcg_dump_info_mips
|
||||
#define tcg_dump_ops tcg_dump_ops_mips
|
||||
#define tcg_emit_op tcg_emit_op_mips
|
||||
#define tcg_enabled tcg_enabled_mips
|
||||
#define tcg_exec_all tcg_exec_all_mips
|
||||
#define tcg_exec_init tcg_exec_init_mips
|
||||
|
|
|
@ -2977,6 +2977,7 @@
|
|||
#define tcg_current_code_size tcg_current_code_size_mips64
|
||||
#define tcg_dump_info tcg_dump_info_mips64
|
||||
#define tcg_dump_ops tcg_dump_ops_mips64
|
||||
#define tcg_emit_op tcg_emit_op_mips64
|
||||
#define tcg_enabled tcg_enabled_mips64
|
||||
#define tcg_exec_all tcg_exec_all_mips64
|
||||
#define tcg_exec_init tcg_exec_init_mips64
|
||||
|
|
|
@ -2977,6 +2977,7 @@
|
|||
#define tcg_current_code_size tcg_current_code_size_mips64el
|
||||
#define tcg_dump_info tcg_dump_info_mips64el
|
||||
#define tcg_dump_ops tcg_dump_ops_mips64el
|
||||
#define tcg_emit_op tcg_emit_op_mips64el
|
||||
#define tcg_enabled tcg_enabled_mips64el
|
||||
#define tcg_exec_all tcg_exec_all_mips64el
|
||||
#define tcg_exec_init tcg_exec_init_mips64el
|
||||
|
|
|
@ -2977,6 +2977,7 @@
|
|||
#define tcg_current_code_size tcg_current_code_size_mipsel
|
||||
#define tcg_dump_info tcg_dump_info_mipsel
|
||||
#define tcg_dump_ops tcg_dump_ops_mipsel
|
||||
#define tcg_emit_op tcg_emit_op_mipsel
|
||||
#define tcg_enabled tcg_enabled_mipsel
|
||||
#define tcg_exec_all tcg_exec_all_mipsel
|
||||
#define tcg_exec_init tcg_exec_init_mipsel
|
||||
|
|
|
@ -2977,6 +2977,7 @@
|
|||
#define tcg_current_code_size tcg_current_code_size_powerpc
|
||||
#define tcg_dump_info tcg_dump_info_powerpc
|
||||
#define tcg_dump_ops tcg_dump_ops_powerpc
|
||||
#define tcg_emit_op tcg_emit_op_powerpc
|
||||
#define tcg_enabled tcg_enabled_powerpc
|
||||
#define tcg_exec_all tcg_exec_all_powerpc
|
||||
#define tcg_exec_init tcg_exec_init_powerpc
|
||||
|
|
|
@ -2977,6 +2977,7 @@
|
|||
#define tcg_current_code_size tcg_current_code_size_sparc
|
||||
#define tcg_dump_info tcg_dump_info_sparc
|
||||
#define tcg_dump_ops tcg_dump_ops_sparc
|
||||
#define tcg_emit_op tcg_emit_op_sparc
|
||||
#define tcg_enabled tcg_enabled_sparc
|
||||
#define tcg_exec_all tcg_exec_all_sparc
|
||||
#define tcg_exec_init tcg_exec_init_sparc
|
||||
|
|
|
@ -2977,6 +2977,7 @@
|
|||
#define tcg_current_code_size tcg_current_code_size_sparc64
|
||||
#define tcg_dump_info tcg_dump_info_sparc64
|
||||
#define tcg_dump_ops tcg_dump_ops_sparc64
|
||||
#define tcg_emit_op tcg_emit_op_sparc64
|
||||
#define tcg_enabled tcg_enabled_sparc64
|
||||
#define tcg_exec_all tcg_exec_all_sparc64
|
||||
#define tcg_exec_init tcg_exec_init_sparc64
|
||||
|
|
|
@ -11518,8 +11518,8 @@ static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
|
|||
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
||||
TCGContext *tcg_ctx = cpu->uc->tcg_ctx;
|
||||
|
||||
dc->insn_start_idx = tcg_op_buf_count(tcg_ctx);
|
||||
tcg_gen_insn_start(tcg_ctx, dc->pc, 0, 0);
|
||||
dc->insn_start = tcg_last_op(tcg_ctx);
|
||||
}
|
||||
|
||||
static bool aarch64_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
|
||||
|
|
|
@ -12312,10 +12312,10 @@ static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
|
|||
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
||||
TCGContext *tcg_ctx = cpu->uc->tcg_ctx;
|
||||
|
||||
dc->insn_start_idx = tcg_op_buf_count(tcg_ctx);
|
||||
tcg_gen_insn_start(tcg_ctx, dc->pc,
|
||||
(dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
|
||||
0);
|
||||
dc->insn_start = tcg_last_op(tcg_ctx);
|
||||
}
|
||||
|
||||
static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
|
||||
|
|
|
@ -65,8 +65,8 @@ typedef struct DisasContext {
|
|||
bool ss_same_el;
|
||||
/* Bottom two bits of XScale c15_cpar coprocessor access control reg */
|
||||
int c15_cpar;
|
||||
/* TCG op index of the current insn_start. */
|
||||
int insn_start_idx;
|
||||
/* TCG op of the current insn_start. */
|
||||
TCGOp *insn_start;
|
||||
#define TMP_A64_MAX 16
|
||||
int tmp_a64_count;
|
||||
TCGv_i64 tmp_a64[TMP_A64_MAX];
|
||||
|
@ -107,8 +107,6 @@ static inline int default_exception_el(DisasContext *s)
|
|||
|
||||
static void disas_set_insn_syndrome(DisasContext *s, uint32_t syn)
|
||||
{
|
||||
TCGContext *tcg_ctx = s->uc->tcg_ctx;
|
||||
|
||||
/* We don't need to save all of the syndrome so we mask and shift
|
||||
* out unneeded bits to help the sleb128 encoder do a better job.
|
||||
*/
|
||||
|
@ -116,9 +114,9 @@ static void disas_set_insn_syndrome(DisasContext *s, uint32_t syn)
|
|||
syn >>= ARM_INSN_START_WORD2_SHIFT;
|
||||
|
||||
/* We check and clear insn_start_idx to catch multiple updates. */
|
||||
assert(s->insn_start_idx != 0);
|
||||
tcg_set_insn_param(tcg_ctx, s->insn_start_idx, 2, syn);
|
||||
s->insn_start_idx = 0;
|
||||
assert(s->insn_start != NULL);
|
||||
tcg_set_insn_param(s->insn_start, 2, syn);
|
||||
s->insn_start = NULL;
|
||||
}
|
||||
|
||||
/* target-specific extra values for is_jmp */
|
||||
|
|
|
@ -604,8 +604,8 @@ static bool swap_commutative2(TCGContext *s, TCGArg *p1, TCGArg *p2)
|
|||
/* Propagate constants and copies, fold constant expressions. */
|
||||
void tcg_optimize(TCGContext *s)
|
||||
{
|
||||
int oi, oi_next, nb_temps, nb_globals;
|
||||
TCGOp *prev_mb = NULL;
|
||||
int nb_temps, nb_globals;
|
||||
TCGOp *op, *op_next, *prev_mb = NULL;
|
||||
|
||||
/* Array VALS has an element for each temp.
|
||||
If this temp holds a constant then its value is kept in VALS' element.
|
||||
|
@ -616,17 +616,14 @@ void tcg_optimize(TCGContext *s)
|
|||
nb_globals = s->nb_globals;
|
||||
reset_all_temps(s, nb_temps);
|
||||
|
||||
for (oi = s->gen_op_buf[0].next; oi != 0; oi = oi_next) {
|
||||
QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
|
||||
tcg_target_ulong mask, partmask, affected;
|
||||
int nb_oargs, nb_iargs, i;
|
||||
TCGArg tmp;
|
||||
|
||||
TCGOp * const op = &s->gen_op_buf[oi];
|
||||
TCGOpcode opc = op->opc;
|
||||
const TCGOpDef *def = &s->tcg_op_defs[opc];
|
||||
|
||||
oi_next = op->next;
|
||||
|
||||
/* Count the arguments, and initialize the temps that are
|
||||
going to be used */
|
||||
if (opc == INDEX_op_call) {
|
||||
|
@ -1260,9 +1257,6 @@ void tcg_optimize(TCGContext *s)
|
|||
rh = op->args[1];
|
||||
tcg_opt_gen_movi(s, op, rl, (int32_t)a);
|
||||
tcg_opt_gen_movi(s, op2, rh, (int32_t)(a >> 32));
|
||||
|
||||
/* We've done all we need to do with the movi. Skip it. */
|
||||
oi_next = op2->next;
|
||||
break;
|
||||
}
|
||||
goto do_default;
|
||||
|
@ -1279,9 +1273,6 @@ void tcg_optimize(TCGContext *s)
|
|||
rh = op->args[1];
|
||||
tcg_opt_gen_movi(s, op, rl, (int32_t)r);
|
||||
tcg_opt_gen_movi(s, op2, rh, (int32_t)(r >> 32));
|
||||
|
||||
/* We've done all we need to do with the movi. Skip it. */
|
||||
oi_next = op2->next;
|
||||
break;
|
||||
}
|
||||
goto do_default;
|
||||
|
|
|
@ -40,29 +40,6 @@ extern TCGv_i32 TCGV_HIGH_link_error(TCGContext *, TCGv_i64);
|
|||
#define TCGV_HIGH TCGV_HIGH_link_error
|
||||
#endif
|
||||
|
||||
/* Note that this is optimized for sequential allocation during translate.
|
||||
Up to and including filling in the forward link immediately. We'll do
|
||||
proper termination of the end of the list after we finish translation. */
|
||||
|
||||
static inline TCGOp *tcg_emit_op(TCGContext *ctx, TCGOpcode opc)
|
||||
{
|
||||
int oi = ctx->gen_next_op_idx;
|
||||
int ni = oi + 1;
|
||||
int pi = oi - 1;
|
||||
TCGOp *op = &ctx->gen_op_buf[oi];
|
||||
|
||||
tcg_debug_assert(oi < OPC_BUF_SIZE);
|
||||
ctx->gen_op_buf[0].prev = oi;
|
||||
ctx->gen_next_op_idx = ni;
|
||||
|
||||
memset(op, 0, offsetof(TCGOp, args));
|
||||
op->opc = opc;
|
||||
op->prev = pi;
|
||||
op->next = ni;
|
||||
|
||||
return op;
|
||||
}
|
||||
|
||||
void tcg_gen_op1(TCGContext *ctx, TCGOpcode opc, TCGArg a1)
|
||||
{
|
||||
TCGOp *op = tcg_emit_op(ctx, opc);
|
||||
|
|
125
qemu/tcg/tcg.c
125
qemu/tcg/tcg.c
|
@ -500,9 +500,8 @@ void tcg_func_start(TCGContext *s)
|
|||
s->goto_tb_issue_mask = 0;
|
||||
#endif
|
||||
|
||||
s->gen_op_buf[0].next = 1;
|
||||
s->gen_op_buf[0].prev = 0;
|
||||
s->gen_next_op_idx = 1;
|
||||
QTAILQ_INIT(&s->ops);
|
||||
QTAILQ_INIT(&s->free_ops);
|
||||
}
|
||||
|
||||
static inline TCGTemp *tcg_temp_alloc(TCGContext *s)
|
||||
|
@ -1050,17 +1049,7 @@ void tcg_gen_callN(TCGContext *s, void *func, TCGTemp *ret, int nargs, TCGTemp *
|
|||
}
|
||||
#endif /* TCG_TARGET_EXTEND_ARGS */
|
||||
|
||||
i = s->gen_next_op_idx;
|
||||
tcg_debug_assert(i < OPC_BUF_SIZE);
|
||||
s->gen_op_buf[0].prev = i;
|
||||
s->gen_next_op_idx = i + 1;
|
||||
op = &s->gen_op_buf[i];
|
||||
|
||||
/* Set links for sequential allocation during translation. */
|
||||
memset(op, 0, offsetof(TCGOp, args));
|
||||
op->opc = INDEX_op_call;
|
||||
op->prev = i - 1;
|
||||
op->next = i + 1;
|
||||
op = tcg_emit_op(s, INDEX_op_call);
|
||||
|
||||
pi = 0;
|
||||
if (ret != NULL) {
|
||||
|
@ -1331,20 +1320,18 @@ void tcg_dump_ops(TCGContext *s)
|
|||
{
|
||||
char buf[128];
|
||||
TCGOp *op;
|
||||
int oi;
|
||||
|
||||
for (oi = s->gen_op_buf[0].next; oi != 0; oi = op->next) {
|
||||
QTAILQ_FOREACH(op, &s->ops, link) {
|
||||
int i, k, nb_oargs, nb_iargs, nb_cargs;
|
||||
const TCGOpDef *def;
|
||||
TCGOpcode c;
|
||||
int col = 0;
|
||||
|
||||
op = &s->gen_op_buf[oi];
|
||||
c = op->opc;
|
||||
def = &s->tcg_op_defs[c];
|
||||
|
||||
if (c == INDEX_op_insn_start) {
|
||||
col += qemu_log("%s ----", oi != s->gen_op_buf[0].next ? "\n" : "");
|
||||
col += qemu_log("\n ----");
|
||||
|
||||
for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
|
||||
target_ulong a;
|
||||
|
@ -1617,61 +1604,49 @@ static void process_op_defs(TCGContext *s)
|
|||
|
||||
void tcg_op_remove(TCGContext *s, TCGOp *op)
|
||||
{
|
||||
int next = op->next;
|
||||
int prev = op->prev;
|
||||
|
||||
/* We should never attempt to remove the list terminator. */
|
||||
tcg_debug_assert(op != &s->gen_op_buf[0]);
|
||||
|
||||
s->gen_op_buf[next].prev = prev;
|
||||
s->gen_op_buf[prev].next = next;
|
||||
|
||||
memset(op, 0, sizeof(*op));
|
||||
QTAILQ_REMOVE(&s->ops, op, link);
|
||||
QTAILQ_INSERT_TAIL(&s->free_ops, op, link);
|
||||
|
||||
#ifdef CONFIG_PROFILER
|
||||
s->del_op_count++;
|
||||
#endif
|
||||
}
|
||||
|
||||
static TCGOp *tcg_op_alloc(TCGContext *s, TCGOpcode opc)
|
||||
{
|
||||
TCGOp *op;
|
||||
|
||||
if (likely(QTAILQ_EMPTY(&s->free_ops))) {
|
||||
op = tcg_malloc(s, sizeof(TCGOp));
|
||||
} else {
|
||||
op = QTAILQ_FIRST(&s->free_ops);
|
||||
QTAILQ_REMOVE(&s->free_ops, op, link);
|
||||
}
|
||||
memset(op, 0, offsetof(TCGOp, link));
|
||||
op->opc = opc;
|
||||
return op;
|
||||
}
|
||||
|
||||
TCGOp *tcg_emit_op(TCGContext *s, TCGOpcode opc)
|
||||
{
|
||||
TCGOp *op = tcg_op_alloc(s, opc);
|
||||
QTAILQ_INSERT_TAIL(&s->ops, op, link);
|
||||
return op;
|
||||
}
|
||||
|
||||
TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op,
|
||||
TCGOpcode opc, int nargs)
|
||||
{
|
||||
int oi = s->gen_next_op_idx;
|
||||
int prev = old_op->prev;
|
||||
int next = old_op - s->gen_op_buf;
|
||||
TCGOp *new_op;
|
||||
|
||||
tcg_debug_assert(oi < OPC_BUF_SIZE);
|
||||
s->gen_next_op_idx = oi + 1;
|
||||
|
||||
new_op = &s->gen_op_buf[oi];
|
||||
new_op->opc = opc;
|
||||
new_op->prev = prev;
|
||||
new_op->next = next;
|
||||
s->gen_op_buf[prev].next = oi;
|
||||
old_op->prev = oi;
|
||||
|
||||
TCGOp *new_op = tcg_op_alloc(s, opc);
|
||||
QTAILQ_INSERT_BEFORE(old_op, new_op, link);
|
||||
return new_op;
|
||||
}
|
||||
|
||||
TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op,
|
||||
TCGOpcode opc, int nargs)
|
||||
{
|
||||
int oi = s->gen_next_op_idx;
|
||||
int prev = old_op - s->gen_op_buf;
|
||||
int next = old_op->next;
|
||||
TCGOp *new_op;
|
||||
|
||||
tcg_debug_assert(oi < OPC_BUF_SIZE);
|
||||
s->gen_next_op_idx = oi + 1;
|
||||
|
||||
new_op = &s->gen_op_buf[oi];
|
||||
new_op->opc = opc;
|
||||
new_op->prev = prev;
|
||||
new_op->next = next;
|
||||
s->gen_op_buf[next].prev = oi;
|
||||
old_op->next = oi;
|
||||
|
||||
TCGOp *new_op = tcg_op_alloc(s, opc);
|
||||
QTAILQ_INSERT_AFTER(&s->ops, old_op, new_op, link);
|
||||
return new_op;
|
||||
}
|
||||
|
||||
|
@ -1738,23 +1713,20 @@ static inline void tcg_la_br_end(TCGContext *s)
|
|||
static void liveness_pass_1(TCGContext *s)
|
||||
{
|
||||
int nb_globals = s->nb_globals;
|
||||
int oi, oi_prev;
|
||||
TCGOp *op, *op_prev;
|
||||
|
||||
tcg_la_func_end(s);
|
||||
|
||||
for (oi = s->gen_op_buf[0].prev; oi != 0; oi = oi_prev) {
|
||||
QTAILQ_FOREACH_REVERSE_SAFE(op, &s->ops, TCGOpHead, link, op_prev) {
|
||||
int i, nb_iargs, nb_oargs;
|
||||
TCGOpcode opc_new, opc_new2;
|
||||
bool have_opc_new2;
|
||||
TCGLifeData arg_life = 0;
|
||||
TCGTemp *arg_ts;
|
||||
|
||||
TCGOp * const op = &s->gen_op_buf[oi];
|
||||
TCGOpcode opc = op->opc;
|
||||
const TCGOpDef *def = &s->tcg_op_defs[opc];
|
||||
|
||||
oi_prev = op->prev;
|
||||
|
||||
switch(opc) {
|
||||
case INDEX_op_call:
|
||||
{
|
||||
|
@ -1978,8 +1950,9 @@ static void liveness_pass_1(TCGContext *s)
|
|||
static bool liveness_pass_2(TCGContext *s)
|
||||
{
|
||||
int nb_globals = s->nb_globals;
|
||||
int nb_temps, i, oi, oi_next;
|
||||
int nb_temps, i;
|
||||
bool changes = false;
|
||||
TCGOp *op, *op_next;
|
||||
|
||||
/* Create a temporary for each indirect global. */
|
||||
for (i = 0; i < nb_globals; ++i) {
|
||||
|
@ -2001,16 +1974,13 @@ static bool liveness_pass_2(TCGContext *s)
|
|||
its->state = TS_DEAD;
|
||||
}
|
||||
|
||||
for (oi = s->gen_op_buf[0].next; oi != 0; oi = oi_next) {
|
||||
TCGOp *op = &s->gen_op_buf[oi];
|
||||
QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
|
||||
TCGOpcode opc = op->opc;
|
||||
const TCGOpDef *def = &s->tcg_op_defs[opc];
|
||||
TCGLifeData arg_life = op->life;
|
||||
int nb_iargs, nb_oargs, call_flags;
|
||||
TCGTemp *arg_ts, *dir_ts;
|
||||
|
||||
oi_next = op->next;
|
||||
|
||||
if (opc == INDEX_op_call) {
|
||||
nb_oargs = op->callo;
|
||||
nb_iargs = op->calli;
|
||||
|
@ -2839,22 +2809,25 @@ static void dump_op_count(void)
|
|||
|
||||
int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
|
||||
{
|
||||
int i, oi, oi_next, num_insns;
|
||||
int i, num_insns;
|
||||
TCGOp *op;
|
||||
|
||||
#ifdef CONFIG_PROFILER
|
||||
{
|
||||
int n;
|
||||
|
||||
n = s->gen_op_buf[0].prev + 1;
|
||||
s->op_count += n;
|
||||
QTAILQ_FOREACH(op, &s->ops, link) {
|
||||
n++;
|
||||
}
|
||||
atomic_set(&s->op_count, s->op_count + n);
|
||||
if (n > s->op_count_max) {
|
||||
s->op_count_max = n;
|
||||
atomic_set(&s->op_count_max, n);
|
||||
}
|
||||
|
||||
n = s->nb_temps;
|
||||
s->temp_count += n;
|
||||
atomic_set(&s->temp_count, s->temp_count + n);
|
||||
if (n > s->temp_count_max) {
|
||||
s->temp_count_max = n;
|
||||
atomic_set(&s->temp_count_max, n);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -2925,11 +2898,9 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
|
|||
#endif
|
||||
|
||||
num_insns = -1;
|
||||
for (oi = s->gen_op_buf[0].next; oi != 0; oi = oi_next) {
|
||||
TCGOp * const op = &s->gen_op_buf[oi];
|
||||
QTAILQ_FOREACH(op, &s->ops, link) {
|
||||
TCGOpcode opc = op->opc;
|
||||
|
||||
oi_next = op->next;
|
||||
#ifdef CONFIG_PROFILER
|
||||
tcg_table_op_count[opc]++;
|
||||
#endif
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include "cpu.h"
|
||||
#include "exec/tb-context.h"
|
||||
#include "qemu/bitops.h"
|
||||
#include "qemu/queue.h"
|
||||
#include "tcg-mo.h"
|
||||
#include "tcg-target.h"
|
||||
#include "exec/exec-all.h"
|
||||
|
@ -51,8 +52,6 @@
|
|||
* and up to 4 + N parameters on 64-bit archs
|
||||
* (N = number of input arguments + output arguments). */
|
||||
#define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
|
||||
#define OPC_BUF_SIZE 640
|
||||
#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
|
||||
|
||||
#define CPU_TEMP_BUF_NLONGS 128
|
||||
|
||||
|
@ -575,23 +574,18 @@ typedef struct TCGOp {
|
|||
unsigned callo : 2; /* 14 */
|
||||
unsigned : 2; /* 16 */
|
||||
|
||||
/* Index of the prev/next op, or 0 for the end of the list. */
|
||||
unsigned prev : 16; /* 32 */
|
||||
unsigned next : 16; /* 48 */
|
||||
|
||||
/* Lifetime data of the operands. */
|
||||
unsigned life : 16; /* 64 */
|
||||
unsigned life : 16; /* 32 */
|
||||
|
||||
/* Next and previous opcodes. */
|
||||
QTAILQ_ENTRY(TCGOp) link;
|
||||
|
||||
/* Arguments for the opcode. */
|
||||
TCGArg args[MAX_OPC_PARAM];
|
||||
} TCGOp;
|
||||
|
||||
/* Make sure that we don't expand the structure without noticing. */
|
||||
QEMU_BUILD_BUG_ON(sizeof(TCGOp) != 8 + sizeof(TCGArg) * MAX_OPC_PARAM);
|
||||
|
||||
/* Make sure operands fit in the bitfields above. */
|
||||
QEMU_BUILD_BUG_ON(NB_OPS > (1 << 8));
|
||||
QEMU_BUILD_BUG_ON(OPC_BUF_SIZE > (1 << 16));
|
||||
|
||||
/* pool based memory allocation */
|
||||
|
||||
|
@ -724,8 +718,6 @@ struct TCGContext {
|
|||
int goto_tb_issue_mask;
|
||||
#endif
|
||||
|
||||
int gen_next_op_idx;
|
||||
|
||||
/* Code generation. Note that we specifically do not use tcg_insn_unit
|
||||
here, because there's too much arithmetic throughout that relies
|
||||
on addition and subtraction working on bytes. Rely on the GCC
|
||||
|
@ -757,12 +749,12 @@ struct TCGContext {
|
|||
TCGTempSet free_temps[TCG_TYPE_COUNT * 2];
|
||||
TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */
|
||||
|
||||
QTAILQ_HEAD(TCGOpHead, TCGOp) ops, free_ops;
|
||||
|
||||
/* Tells which temporary holds a given register.
|
||||
It does not take into account fixed registers */
|
||||
TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS];
|
||||
|
||||
TCGOp gen_op_buf[OPC_BUF_SIZE];
|
||||
|
||||
target_ulong gen_opc_pc[OPC_BUF_SIZE];
|
||||
uint16_t gen_opc_icount[OPC_BUF_SIZE];
|
||||
uint8_t gen_opc_instr_start[OPC_BUF_SIZE];
|
||||
|
@ -977,21 +969,21 @@ static inline TCGv_i32 TCGV_HIGH(TCGContext *s, TCGv_i64 t)
|
|||
}
|
||||
#endif
|
||||
|
||||
static inline void tcg_set_insn_param(TCGContext *tcg_ctx, int op_idx, int arg, TCGArg v)
|
||||
static inline void tcg_set_insn_param(TCGOp *op, int arg, TCGArg v)
|
||||
{
|
||||
tcg_ctx->gen_op_buf[op_idx].args[arg] = v;
|
||||
op->args[arg] = v;
|
||||
}
|
||||
|
||||
/* The number of opcodes emitted so far. */
|
||||
static inline int tcg_op_buf_count(TCGContext *tcg_ctx)
|
||||
/* The last op that was emitted. */
|
||||
static inline TCGOp *tcg_last_op(TCGContext *tcg_ctx)
|
||||
{
|
||||
return tcg_ctx->gen_next_op_idx;
|
||||
return QTAILQ_LAST(&tcg_ctx->ops, TCGOpHead);
|
||||
}
|
||||
|
||||
/* Test for whether to terminate the TB for using too many opcodes. */
|
||||
static inline bool tcg_op_buf_full(TCGContext *tcg_ctx)
|
||||
{
|
||||
return tcg_op_buf_count(tcg_ctx) >= OPC_MAX_SIZE;
|
||||
return false;
|
||||
}
|
||||
|
||||
TCGTemp *tcg_global_mem_new_internal(TCGContext *s, TCGType type, TCGv_ptr base,
|
||||
|
@ -1083,6 +1075,7 @@ bool tcg_op_supported(TCGOpcode op);
|
|||
|
||||
void tcg_gen_callN(TCGContext *s, void *func, TCGTemp *ret, int nargs, TCGTemp **args);
|
||||
|
||||
TCGOp *tcg_emit_op(TCGContext *s, TCGOpcode opc);
|
||||
void tcg_op_remove(TCGContext *s, TCGOp *op);
|
||||
TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op, TCGOpcode opc, int narg);
|
||||
TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op, TCGOpcode opc, int narg);
|
||||
|
|
|
@ -2977,6 +2977,7 @@
|
|||
#define tcg_current_code_size tcg_current_code_size_x86_64
|
||||
#define tcg_dump_info tcg_dump_info_x86_64
|
||||
#define tcg_dump_ops tcg_dump_ops_x86_64
|
||||
#define tcg_emit_op tcg_emit_op_x86_64
|
||||
#define tcg_enabled tcg_enabled_x86_64
|
||||
#define tcg_exec_all tcg_exec_all_x86_64
|
||||
#define tcg_exec_init tcg_exec_init_x86_64
|
||||
|
|
Loading…
Reference in a new issue