diff --git a/qemu/tcg/aarch64/tcg-target.inc.c b/qemu/tcg/aarch64/tcg-target.inc.c index 672c6a6c..046c3bb0 100644 --- a/qemu/tcg/aarch64/tcg-target.inc.c +++ b/qemu/tcg/aarch64/tcg-target.inc.c @@ -1392,14 +1392,15 @@ static inline void tcg_out_adr(TCGContext *s, TCGReg rd, void *target) tcg_out_insn(s, 3406, ADR, rd, offset); } -static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGMemOpIdx oi = lb->oi; TCGMemOp opc = get_memop(oi); TCGMemOp size = opc & MO_SIZE; - bool ok = reloc_pc19(lb->label_ptr[0], s->code_ptr); - tcg_debug_assert(ok); + if (!reloc_pc19(lb->label_ptr[0], s->code_ptr)) { + return false; + } tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X0, TCG_AREG0); tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg); @@ -1413,16 +1414,18 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) } tcg_out_goto(s, lb->raddr); + return true; } -static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGMemOpIdx oi = lb->oi; TCGMemOp opc = get_memop(oi); TCGMemOp size = opc & MO_SIZE; - bool ok = reloc_pc19(lb->label_ptr[0], s->code_ptr); - tcg_debug_assert(ok); + if (!reloc_pc19(lb->label_ptr[0], s->code_ptr)) { + return false; + } tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X0, TCG_AREG0); tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg); @@ -1431,6 +1434,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) tcg_out_adr(s, TCG_REG_X4, lb->raddr); tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]); tcg_out_goto(s, lb->raddr); + return true; } static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi, diff --git a/qemu/tcg/arm/tcg-target.inc.c b/qemu/tcg/arm/tcg-target.inc.c index a48648a8..d9bf55c3 100644 --- a/qemu/tcg/arm/tcg-target.inc.c +++ b/qemu/tcg/arm/tcg-target.inc.c @@ -1373,15 +1373,16 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi, label->label_ptr[0] = label_ptr; } -static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGReg argreg, datalo, datahi; TCGMemOpIdx oi = lb->oi; TCGMemOp opc = get_memop(oi); void *func; - bool ok = reloc_pc24(lb->label_ptr[0], s->code_ptr); - tcg_debug_assert(ok); + if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) { + return false; + } argreg = tcg_out_arg_reg32(s, TCG_REG_R0, TCG_AREG0); if (TARGET_LONG_BITS == 64) { @@ -1433,16 +1434,18 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) } tcg_out_goto(s, COND_AL, lb->raddr); + return true; } -static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGReg argreg, datalo, datahi; TCGMemOpIdx oi = lb->oi; TCGMemOp opc = get_memop(oi); - bool ok = reloc_pc24(lb->label_ptr[0], s->code_ptr); - tcg_debug_assert(ok); + if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) { + return false; + } argreg = TCG_REG_R0; argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0); @@ -1475,6 +1478,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) /* Tail-call to the helper, which will return to the fast path. */ tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]); + return true; } #endif /* SOFTMMU */ diff --git a/qemu/tcg/i386/tcg-target.inc.c b/qemu/tcg/i386/tcg-target.inc.c index 0824060c..8b38cdfc 100644 --- a/qemu/tcg/i386/tcg-target.inc.c +++ b/qemu/tcg/i386/tcg-target.inc.c @@ -1740,7 +1740,7 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64, /* * Generate code for the slow path for a load at the end of block */ -static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { TCGMemOpIdx oi = l->oi; TCGMemOp opc = get_memop(oi); @@ -1819,12 +1819,13 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) /* Jump to the code corresponding to next IR of qemu_st */ tcg_out_jmp(s, l->raddr); + return true; } /* * Generate code for the slow path for a store at the end of block */ -static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { TCGMemOpIdx oi = l->oi; TCGMemOp opc = get_memop(oi); @@ -1887,6 +1888,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) /* "Tail call" to the helper, with the return address back inline. */ tcg_out_push(s, retaddr); tcg_out_jmp(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]); + return true; } #elif defined(__x86_64__) && defined(__linux__) # include diff --git a/qemu/tcg/mips/tcg-target.inc.c b/qemu/tcg/mips/tcg-target.inc.c index e73721aa..2efac702 100644 --- a/qemu/tcg/mips/tcg-target.inc.c +++ b/qemu/tcg/mips/tcg-target.inc.c @@ -1262,7 +1262,7 @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi, } } -static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { TCGMemOpIdx oi = lb->oi; TCGMemOp opc = get_memop(oi); @@ -1308,9 +1308,10 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) } else { tcg_out_opc_reg(s, OPC_OR, v0, TCG_REG_V0, TCG_REG_ZERO); } + return true; } -static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { TCGMemOpIdx oi = lb->oi; TCGMemOp opc = get_memop(oi); @@ -1358,6 +1359,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) tcg_out_call_int(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)], true); /* delay slot */ tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); + return true; } #endif diff --git a/qemu/tcg/ppc/tcg-target.inc.c b/qemu/tcg/ppc/tcg-target.inc.c index 32c8173a..37b24f65 100644 --- a/qemu/tcg/ppc/tcg-target.inc.c +++ b/qemu/tcg/ppc/tcg-target.inc.c @@ -1642,13 +1642,15 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi, label->label_ptr[0] = lptr; } -static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGMemOpIdx oi = lb->oi; TCGMemOp opc = get_memop(oi); TCGReg hi, lo, arg = TCG_REG_R3; - **lb->label_ptr |= reloc_pc14_val(*lb->label_ptr, s->code_ptr); + if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) { + return false; + } tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0); @@ -1684,16 +1686,19 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) } tcg_out_b(s, 0, lb->raddr); + return true; } -static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGMemOpIdx oi = lb->oi; TCGMemOp opc = get_memop(oi); TCGMemOp s_bits = opc & MO_SIZE; TCGReg hi, lo, arg = TCG_REG_R3; - **lb->label_ptr |= reloc_pc14_val(*lb->label_ptr, s->code_ptr); + if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) { + return false; + } tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0); @@ -1742,6 +1747,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]); tcg_out_b(s, 0, lb->raddr); + return true; } #endif /* SOFTMMU */ diff --git a/qemu/tcg/s390/tcg-target.inc.c b/qemu/tcg/s390/tcg-target.inc.c index 2fbc310b..547aac5f 100644 --- a/qemu/tcg/s390/tcg-target.inc.c +++ b/qemu/tcg/s390/tcg-target.inc.c @@ -1622,16 +1622,17 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi, label->label_ptr[0] = label_ptr; } -static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGReg addr_reg = lb->addrlo_reg; TCGReg data_reg = lb->datalo_reg; TCGMemOpIdx oi = lb->oi; TCGMemOp opc = get_memop(oi); - bool ok = patch_reloc(lb->label_ptr[0], R_390_PC16DBL, - (intptr_t)s->code_ptr, 2); - tcg_debug_assert(ok); + if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL, + (intptr_t)s->code_ptr, 2)) { + return false; + } tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0); if (TARGET_LONG_BITS == 64) { @@ -1643,18 +1644,20 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_R2); tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr); + return true; } -static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGReg addr_reg = lb->addrlo_reg; TCGReg data_reg = lb->datalo_reg; TCGMemOpIdx oi = lb->oi; TCGMemOp opc = get_memop(oi); - bool ok = patch_reloc(lb->label_ptr[0], R_390_PC16DBL, - (intptr_t)s->code_ptr, 2); - tcg_debug_assert(ok); + if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL, + (intptr_t)s->code_ptr, 2)) { + return false; + } tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0); if (TARGET_LONG_BITS == 64) { @@ -1681,6 +1684,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]); tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr); + return true; } #else static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg, diff --git a/qemu/tcg/tcg-ldst.inc.c b/qemu/tcg/tcg-ldst.inc.c index 85cfab8f..f4f78146 100644 --- a/qemu/tcg/tcg-ldst.inc.c +++ b/qemu/tcg/tcg-ldst.inc.c @@ -37,19 +37,19 @@ typedef struct TCGLabelQemuLdst { * Generate TB finalization at the end of block */ -static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l); -static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l); +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l); +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l); -static bool tcg_out_ldst_finalize(TCGContext *s) +static int tcg_out_ldst_finalize(TCGContext *s) { TCGLabelQemuLdst *lb; /* qemu_ld/st slow paths */ QSIMPLEQ_FOREACH(lb, &s->ldst_labels, next) { - if (lb->is_ld) { - tcg_out_qemu_ld_slow_path(s, lb); - } else { - tcg_out_qemu_st_slow_path(s, lb); + if (lb->is_ld + ? !tcg_out_qemu_ld_slow_path(s, lb) + : !tcg_out_qemu_st_slow_path(s, lb)) { + return -2; } /* Test for (pending) buffer overflow. The assumption is that any @@ -57,10 +57,10 @@ static bool tcg_out_ldst_finalize(TCGContext *s) the buffer completely. Thus we can test for overflow after generating code without having to check during generation. */ if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) { - return false; + return -1; } } - return true; + return 0; } /* diff --git a/qemu/tcg/tcg.c b/qemu/tcg/tcg.c index cad39252..967dcb77 100644 --- a/qemu/tcg/tcg.c +++ b/qemu/tcg/tcg.c @@ -119,7 +119,7 @@ static void tcg_out_call(TCGContext *s, tcg_insn_unit *target); static int tcg_target_const_match(tcg_target_long val, TCGType type, const TCGArgConstraint *arg_ct); #ifdef TCG_TARGET_NEED_LDST_LABELS -static bool tcg_out_ldst_finalize(TCGContext *s); +static int tcg_out_ldst_finalize(TCGContext *s); #endif #define TCG_HIGHWATER 1024 @@ -3373,8 +3373,9 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb) /* Generate TB finalization at the end of block */ #ifdef TCG_TARGET_NEED_LDST_LABELS - if (!tcg_out_ldst_finalize(s)) { - return -1; + i = tcg_out_ldst_finalize(s); + if (i < 0) { + return i; } #endif #ifdef TCG_TARGET_NEED_POOL_LABELS