mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2025-07-03 10:48:12 +00:00
tcg: Update all other target source files to be in sync with qemu
This commit is contained in:
parent
2e5cfbcfbf
commit
83398bf99c
|
@ -144,5 +144,6 @@ void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t);
|
||||||
#ifdef CONFIG_SOFTMMU
|
#ifdef CONFIG_SOFTMMU
|
||||||
#define TCG_TARGET_NEED_LDST_LABELS
|
#define TCG_TARGET_NEED_LDST_LABELS
|
||||||
#endif
|
#endif
|
||||||
|
#define TCG_TARGET_NEED_POOL_LABELS
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -2026,6 +2026,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
|
||||||
tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
|
tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
|
||||||
ARITH_MOV, args[0], 0, 0);
|
ARITH_MOV, args[0], 0, 0);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case INDEX_op_brcond2_i32:
|
case INDEX_op_brcond2_i32:
|
||||||
c = tcg_out_cmp2(s, args, const_args);
|
c = tcg_out_cmp2(s, args, const_args);
|
||||||
tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[5]));
|
tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[5]));
|
||||||
|
@ -2125,6 +2126,7 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
|
||||||
switch (op) {
|
switch (op) {
|
||||||
case INDEX_op_goto_ptr:
|
case INDEX_op_goto_ptr:
|
||||||
return &r;
|
return &r;
|
||||||
|
|
||||||
case INDEX_op_ld8u_i32:
|
case INDEX_op_ld8u_i32:
|
||||||
case INDEX_op_ld8s_i32:
|
case INDEX_op_ld8s_i32:
|
||||||
case INDEX_op_ld16u_i32:
|
case INDEX_op_ld16u_i32:
|
||||||
|
@ -2143,6 +2145,7 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
|
||||||
case INDEX_op_extract_i32:
|
case INDEX_op_extract_i32:
|
||||||
case INDEX_op_sextract_i32:
|
case INDEX_op_sextract_i32:
|
||||||
return &r_r;
|
return &r_r;
|
||||||
|
|
||||||
case INDEX_op_add_i32:
|
case INDEX_op_add_i32:
|
||||||
case INDEX_op_sub_i32:
|
case INDEX_op_sub_i32:
|
||||||
case INDEX_op_setcond_i32:
|
case INDEX_op_setcond_i32:
|
||||||
|
@ -2168,6 +2171,7 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
|
||||||
case INDEX_op_rotl_i32:
|
case INDEX_op_rotl_i32:
|
||||||
case INDEX_op_rotr_i32:
|
case INDEX_op_rotr_i32:
|
||||||
return &r_r_ri;
|
return &r_r_ri;
|
||||||
|
|
||||||
case INDEX_op_brcond_i32:
|
case INDEX_op_brcond_i32:
|
||||||
return &br;
|
return &br;
|
||||||
case INDEX_op_deposit_i32:
|
case INDEX_op_deposit_i32:
|
||||||
|
@ -2182,6 +2186,7 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
|
||||||
return &br2;
|
return &br2;
|
||||||
case INDEX_op_setcond2_i32:
|
case INDEX_op_setcond2_i32:
|
||||||
return &setc2;
|
return &setc2;
|
||||||
|
|
||||||
case INDEX_op_qemu_ld_i32:
|
case INDEX_op_qemu_ld_i32:
|
||||||
return TARGET_LONG_BITS == 32 ? &r_l : &r_l_l;
|
return TARGET_LONG_BITS == 32 ? &r_l : &r_l_l;
|
||||||
case INDEX_op_qemu_ld_i64:
|
case INDEX_op_qemu_ld_i64:
|
||||||
|
@ -2190,6 +2195,7 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
|
||||||
return TARGET_LONG_BITS == 32 ? &s_s : &s_s_s;
|
return TARGET_LONG_BITS == 32 ? &s_s : &s_s_s;
|
||||||
case INDEX_op_qemu_st_i64:
|
case INDEX_op_qemu_st_i64:
|
||||||
return TARGET_LONG_BITS == 32 ? &s_s_s : &s_s_s_s;
|
return TARGET_LONG_BITS == 32 ? &s_s_s : &s_s_s_s;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -649,11 +649,6 @@ static void tcg_out_bswap32(TCGContext *s, TCGReg ret, TCGReg arg)
|
||||||
tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
|
tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
|
||||||
tcg_out_opc_sa(s, OPC_ROTR, ret, ret, 16);
|
tcg_out_opc_sa(s, OPC_ROTR, ret, ret, 16);
|
||||||
} else {
|
} else {
|
||||||
/* ret and arg must be different and can't be register at */
|
|
||||||
if (ret == arg || ret == TCG_TMP0 || arg == TCG_TMP0) {
|
|
||||||
tcg_abort();
|
|
||||||
}
|
|
||||||
|
|
||||||
tcg_out_bswap_subr(s, bswap32_addr);
|
tcg_out_bswap_subr(s, bswap32_addr);
|
||||||
/* delay slot -- never omit the insn, like tcg_out_mov might. */
|
/* delay slot -- never omit the insn, like tcg_out_mov might. */
|
||||||
tcg_out_opc_reg(s, OPC_OR, TCG_TMP0, arg, TCG_REG_ZERO);
|
tcg_out_opc_reg(s, OPC_OR, TCG_TMP0, arg, TCG_REG_ZERO);
|
||||||
|
@ -762,6 +757,55 @@ static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void tcg_out_addsub2(TCGContext *s, TCGReg rl, TCGReg rh, TCGReg al,
|
||||||
|
TCGReg ah, TCGArg bl, TCGArg bh, bool cbl,
|
||||||
|
bool cbh, bool is_sub)
|
||||||
|
{
|
||||||
|
TCGReg th = TCG_TMP1;
|
||||||
|
|
||||||
|
/* If we have a negative constant such that negating it would
|
||||||
|
make the high part zero, we can (usually) eliminate one insn. */
|
||||||
|
if (cbl && cbh && bh == -1 && bl != 0) {
|
||||||
|
bl = -bl;
|
||||||
|
bh = 0;
|
||||||
|
is_sub = !is_sub;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* By operating on the high part first, we get to use the final
|
||||||
|
carry operation to move back from the temporary. */
|
||||||
|
if (!cbh) {
|
||||||
|
tcg_out_opc_reg(s, (is_sub ? OPC_SUBU : OPC_ADDU), th, ah, bh);
|
||||||
|
} else if (bh != 0 || ah == rl) {
|
||||||
|
tcg_out_opc_imm(s, OPC_ADDIU, th, ah, (is_sub ? -bh : bh));
|
||||||
|
} else {
|
||||||
|
th = ah;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Note that tcg optimization should eliminate the bl == 0 case. */
|
||||||
|
if (is_sub) {
|
||||||
|
if (cbl) {
|
||||||
|
tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, al, bl);
|
||||||
|
tcg_out_opc_imm(s, OPC_ADDIU, rl, al, -bl);
|
||||||
|
} else {
|
||||||
|
tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, al, bl);
|
||||||
|
tcg_out_opc_reg(s, OPC_SUBU, rl, al, bl);
|
||||||
|
}
|
||||||
|
tcg_out_opc_reg(s, OPC_SUBU, rh, th, TCG_TMP0);
|
||||||
|
} else {
|
||||||
|
if (cbl) {
|
||||||
|
tcg_out_opc_imm(s, OPC_ADDIU, rl, al, bl);
|
||||||
|
tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, rl, bl);
|
||||||
|
} else if (rl == al && rl == bl) {
|
||||||
|
tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, al, 31);
|
||||||
|
tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl);
|
||||||
|
} else {
|
||||||
|
tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl);
|
||||||
|
tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, rl, (rl == bl ? al : bl));
|
||||||
|
}
|
||||||
|
tcg_out_opc_reg(s, OPC_ADDU, rh, th, TCG_TMP0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Bit 0 set if inversion required; bit 1 set if swapping required. */
|
/* Bit 0 set if inversion required; bit 1 set if swapping required. */
|
||||||
#define MIPS_CMP_INV 1
|
#define MIPS_CMP_INV 1
|
||||||
#define MIPS_CMP_SWAP 2
|
#define MIPS_CMP_SWAP 2
|
||||||
|
@ -1271,7 +1315,7 @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi,
|
||||||
|
|
||||||
static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
|
static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
|
||||||
{
|
{
|
||||||
TCGMemOpIdx oi = lb->oi;
|
TCGMemOpIdx oi = l->oi;
|
||||||
TCGMemOp opc = get_memop(oi);
|
TCGMemOp opc = get_memop(oi);
|
||||||
TCGReg v0;
|
TCGReg v0;
|
||||||
int i;
|
int i;
|
||||||
|
@ -1319,7 +1363,7 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
|
||||||
|
|
||||||
static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
|
static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
|
||||||
{
|
{
|
||||||
TCGMemOpIdx oi = lb->oi;
|
TCGMemOpIdx oi = l->oi;
|
||||||
TCGMemOp opc = get_memop(oi);
|
TCGMemOp opc = get_memop(oi);
|
||||||
TCGMemOp s_bits = opc & MO_SIZE;
|
TCGMemOp s_bits = opc & MO_SIZE;
|
||||||
int i;
|
int i;
|
||||||
|
@ -1424,6 +1468,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
|
||||||
}
|
}
|
||||||
/* FALLTHRU */
|
/* FALLTHRU */
|
||||||
case MO_SL:
|
case MO_SL:
|
||||||
|
tcg_out_opc_imm(s, OPC_LW, lo, base, 0);
|
||||||
break;
|
break;
|
||||||
case MO_Q | MO_BSWAP:
|
case MO_Q | MO_BSWAP:
|
||||||
if (TCG_TARGET_REG_BITS == 64) {
|
if (TCG_TARGET_REG_BITS == 64) {
|
||||||
|
@ -1574,55 +1619,6 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tcg_out_addsub2(TCGContext *s, TCGReg rl, TCGReg rh, TCGReg al,
|
|
||||||
TCGReg ah, TCGArg bl, TCGArg bh, bool cbl,
|
|
||||||
bool cbh, bool is_sub)
|
|
||||||
{
|
|
||||||
TCGReg th = TCG_TMP1;
|
|
||||||
|
|
||||||
/* If we have a negative constant such that negating it would
|
|
||||||
make the high part zero, we can (usually) eliminate one insn. */
|
|
||||||
if (cbl && cbh && bh == -1 && bl != 0) {
|
|
||||||
bl = -bl;
|
|
||||||
bh = 0;
|
|
||||||
is_sub = !is_sub;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* By operating on the high part first, we get to use the final
|
|
||||||
carry operation to move back from the temporary. */
|
|
||||||
if (!cbh) {
|
|
||||||
tcg_out_opc_reg(s, (is_sub ? OPC_SUBU : OPC_ADDU), th, ah, bh);
|
|
||||||
} else if (bh != 0 || ah == rl) {
|
|
||||||
tcg_out_opc_imm(s, OPC_ADDIU, th, ah, (is_sub ? -bh : bh));
|
|
||||||
} else {
|
|
||||||
th = ah;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Note that tcg optimization should eliminate the bl == 0 case. */
|
|
||||||
if (is_sub) {
|
|
||||||
if (cbl) {
|
|
||||||
tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, al, bl);
|
|
||||||
tcg_out_opc_imm(s, OPC_ADDIU, rl, al, -bl);
|
|
||||||
} else {
|
|
||||||
tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, al, bl);
|
|
||||||
tcg_out_opc_reg(s, OPC_SUBU, rl, al, bl);
|
|
||||||
}
|
|
||||||
tcg_out_opc_reg(s, OPC_SUBU, rh, th, TCG_TMP0);
|
|
||||||
} else {
|
|
||||||
if (cbl) {
|
|
||||||
tcg_out_opc_imm(s, OPC_ADDIU, rl, al, bl);
|
|
||||||
tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, rl, bl);
|
|
||||||
} else if (rl == al && rl == bl) {
|
|
||||||
tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, al, 31);
|
|
||||||
tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl);
|
|
||||||
} else {
|
|
||||||
tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl);
|
|
||||||
tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, rl, (rl == bl ? al : bl));
|
|
||||||
}
|
|
||||||
tcg_out_opc_reg(s, OPC_ADDU, rh, th, TCG_TMP0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
|
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
|
||||||
{
|
{
|
||||||
TCGReg addr_regl, addr_regh QEMU_UNUSED_VAR;
|
TCGReg addr_regl, addr_regh QEMU_UNUSED_VAR;
|
||||||
|
@ -1671,22 +1667,12 @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
|
||||||
/* Note that SYNC_MB is a slightly weaker than SYNC 0,
|
/* Note that SYNC_MB is a slightly weaker than SYNC 0,
|
||||||
as the former is an ordering barrier and the latter
|
as the former is an ordering barrier and the latter
|
||||||
is a completion barrier. */
|
is a completion barrier. */
|
||||||
OPC_SYNC_MB,
|
[0 ... TCG_MO_ALL] = OPC_SYNC_MB,
|
||||||
OPC_SYNC_RMB,
|
[TCG_MO_LD_LD] = OPC_SYNC_RMB,
|
||||||
OPC_SYNC_MB,
|
[TCG_MO_ST_ST] = OPC_SYNC_WMB,
|
||||||
OPC_SYNC_MB,
|
[TCG_MO_LD_ST] = OPC_SYNC_RELEASE,
|
||||||
OPC_SYNC_RELEASE,
|
[TCG_MO_LD_ST | TCG_MO_ST_ST] = OPC_SYNC_RELEASE,
|
||||||
OPC_SYNC_ACQUIRE,
|
[TCG_MO_LD_ST | TCG_MO_LD_LD] = OPC_SYNC_ACQUIRE,
|
||||||
OPC_SYNC_MB,
|
|
||||||
OPC_SYNC_MB,
|
|
||||||
OPC_SYNC_WMB,
|
|
||||||
OPC_SYNC_MB,
|
|
||||||
OPC_SYNC_MB,
|
|
||||||
OPC_SYNC_MB,
|
|
||||||
OPC_SYNC_RELEASE,
|
|
||||||
OPC_SYNC_MB,
|
|
||||||
OPC_SYNC_MB,
|
|
||||||
OPC_SYNC_MB,
|
|
||||||
};
|
};
|
||||||
tcg_out32(s, sync[a0 & TCG_MO_ALL]);
|
tcg_out32(s, sync[a0 & TCG_MO_ALL]);
|
||||||
}
|
}
|
||||||
|
@ -2062,7 +2048,6 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
|
||||||
tcg_out_opc_reg(s, OPC_ROTRV, a0, TCG_TMP0, a1);
|
tcg_out_opc_reg(s, OPC_ROTRV, a0, TCG_TMP0, a1);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case INDEX_op_sar_i64:
|
case INDEX_op_sar_i64:
|
||||||
if (c2) {
|
if (c2) {
|
||||||
tcg_out_dsra(s, a0, a1, a2);
|
tcg_out_dsra(s, a0, a1, a2);
|
||||||
|
@ -2210,6 +2195,7 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
|
||||||
switch (op) {
|
switch (op) {
|
||||||
case INDEX_op_goto_ptr:
|
case INDEX_op_goto_ptr:
|
||||||
return &r;
|
return &r;
|
||||||
|
|
||||||
case INDEX_op_ld8u_i32:
|
case INDEX_op_ld8u_i32:
|
||||||
case INDEX_op_ld8s_i32:
|
case INDEX_op_ld8s_i32:
|
||||||
case INDEX_op_ld16u_i32:
|
case INDEX_op_ld16u_i32:
|
||||||
|
@ -2357,7 +2343,6 @@ static const int tcg_target_callee_save_regs[] = {
|
||||||
/* The Linux kernel doesn't provide any information about the available
|
/* The Linux kernel doesn't provide any information about the available
|
||||||
instruction set. Probe it using a signal handler. */
|
instruction set. Probe it using a signal handler. */
|
||||||
|
|
||||||
#include <signal.h>
|
|
||||||
|
|
||||||
#ifndef use_movnz_instructions
|
#ifndef use_movnz_instructions
|
||||||
bool use_movnz_instructions = false;
|
bool use_movnz_instructions = false;
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -55,15 +55,10 @@
|
||||||
#define TCG_REG_TB TCG_REG_R12
|
#define TCG_REG_TB TCG_REG_R12
|
||||||
#define USE_REG_TB (!(s390_facilities & FACILITY_GEN_INST_EXT))
|
#define USE_REG_TB (!(s390_facilities & FACILITY_GEN_INST_EXT))
|
||||||
|
|
||||||
#ifdef CONFIG_USE_GUEST_BASE
|
#ifndef CONFIG_SOFTMMU
|
||||||
#define TCG_GUEST_BASE_REG TCG_REG_R13
|
#define TCG_GUEST_BASE_REG TCG_REG_R13
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef GUEST_BASE
|
|
||||||
#define GUEST_BASE 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
/* All of the following instructions are prefixed with their instruction
|
/* All of the following instructions are prefixed with their instruction
|
||||||
format, and are defined as 8- or 16-bit quantities, even when the two
|
format, and are defined as 8- or 16-bit quantities, even when the two
|
||||||
halves of the 16-bit quantity may appear 32 bits apart in the insn.
|
halves of the 16-bit quantity may appear 32 bits apart in the insn.
|
||||||
|
@ -369,7 +364,6 @@ static void * const qemu_st_helpers[16] = {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static tcg_insn_unit *tb_ret_addr;
|
static tcg_insn_unit *tb_ret_addr;
|
||||||
|
|
||||||
uint64_t s390_facilities;
|
uint64_t s390_facilities;
|
||||||
|
|
||||||
static void patch_reloc(tcg_insn_unit *code_ptr, int type,
|
static void patch_reloc(tcg_insn_unit *code_ptr, int type,
|
||||||
|
@ -1041,6 +1035,7 @@ static void tgen_xori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Use the constant pool if USE_REG_TB, but not for small constants. */
|
/* Use the constant pool if USE_REG_TB, but not for small constants. */
|
||||||
if (maybe_out_small_movi(s, type, TCG_TMP0, val)) {
|
if (maybe_out_small_movi(s, type, TCG_TMP0, val)) {
|
||||||
if (type == TCG_TYPE_I32) {
|
if (type == TCG_TYPE_I32) {
|
||||||
|
@ -1548,7 +1543,6 @@ QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1])
|
||||||
static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc,
|
static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc,
|
||||||
int mem_index, bool is_ld)
|
int mem_index, bool is_ld)
|
||||||
{
|
{
|
||||||
int s_mask = (1 << (opc & MO_SIZE)) - 1;
|
|
||||||
unsigned s_bits = opc & MO_SIZE;
|
unsigned s_bits = opc & MO_SIZE;
|
||||||
unsigned a_bits = get_alignment_bits(opc);
|
unsigned a_bits = get_alignment_bits(opc);
|
||||||
unsigned s_mask = (1 << s_bits) - 1;
|
unsigned s_mask = (1 << s_bits) - 1;
|
||||||
|
@ -1681,9 +1675,9 @@ static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
|
||||||
tgen_ext32u(s, TCG_TMP0, *addr_reg);
|
tgen_ext32u(s, TCG_TMP0, *addr_reg);
|
||||||
*addr_reg = TCG_TMP0;
|
*addr_reg = TCG_TMP0;
|
||||||
}
|
}
|
||||||
if (GUEST_BASE < 0x80000) {
|
if (guest_base < 0x80000) {
|
||||||
*index_reg = TCG_REG_NONE;
|
*index_reg = TCG_REG_NONE;
|
||||||
*disp = GUEST_BASE;
|
*disp = guest_base;
|
||||||
} else {
|
} else {
|
||||||
*index_reg = TCG_GUEST_BASE_REG;
|
*index_reg = TCG_GUEST_BASE_REG;
|
||||||
*disp = 0;
|
*disp = 0;
|
||||||
|
@ -1878,7 +1872,6 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
|
||||||
} else {
|
} else {
|
||||||
tcg_out_insn(s, RRF, SRK, a0, a1, a2);
|
tcg_out_insn(s, RRF, SRK, a0, a1, a2);
|
||||||
}
|
}
|
||||||
tcg_out_insn(s, RR, SR, args[0], args[2]);
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case INDEX_op_and_i32:
|
case INDEX_op_and_i32:
|
||||||
|
@ -2321,143 +2314,6 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static const TCGTargetOpDef s390_op_defs[] = {
|
|
||||||
{ INDEX_op_exit_tb, { } },
|
|
||||||
{ INDEX_op_goto_tb, { } },
|
|
||||||
{ INDEX_op_br, { } },
|
|
||||||
{ INDEX_op_goto_ptr, { "r" } },
|
|
||||||
|
|
||||||
{ INDEX_op_ld8u_i32, { "r", "r" } },
|
|
||||||
{ INDEX_op_ld8s_i32, { "r", "r" } },
|
|
||||||
{ INDEX_op_ld16u_i32, { "r", "r" } },
|
|
||||||
{ INDEX_op_ld16s_i32, { "r", "r" } },
|
|
||||||
{ INDEX_op_ld_i32, { "r", "r" } },
|
|
||||||
{ INDEX_op_st8_i32, { "r", "r" } },
|
|
||||||
{ INDEX_op_st16_i32, { "r", "r" } },
|
|
||||||
{ INDEX_op_st_i32, { "r", "r" } },
|
|
||||||
|
|
||||||
{ INDEX_op_add_i32, { "r", "r", "ri" } },
|
|
||||||
{ INDEX_op_sub_i32, { "r", "0", "ri" } },
|
|
||||||
{ INDEX_op_mul_i32, { "r", "0", "rK" } },
|
|
||||||
|
|
||||||
{ INDEX_op_div2_i32, { "b", "a", "0", "1", "r" } },
|
|
||||||
{ INDEX_op_divu2_i32, { "b", "a", "0", "1", "r" } },
|
|
||||||
|
|
||||||
{ INDEX_op_and_i32, { "r", "0", "ri" } },
|
|
||||||
{ INDEX_op_or_i32, { "r", "0", "rO" } },
|
|
||||||
{ INDEX_op_xor_i32, { "r", "0", "rX" } },
|
|
||||||
|
|
||||||
{ INDEX_op_neg_i32, { "r", "r" } },
|
|
||||||
|
|
||||||
{ INDEX_op_shl_i32, { "r", "0", "ri" } },
|
|
||||||
{ INDEX_op_shr_i32, { "r", "0", "ri" } },
|
|
||||||
{ INDEX_op_sar_i32, { "r", "0", "ri" } },
|
|
||||||
|
|
||||||
{ INDEX_op_rotl_i32, { "r", "r", "ri" } },
|
|
||||||
{ INDEX_op_rotr_i32, { "r", "r", "ri" } },
|
|
||||||
|
|
||||||
{ INDEX_op_ext8s_i32, { "r", "r" } },
|
|
||||||
{ INDEX_op_ext8u_i32, { "r", "r" } },
|
|
||||||
{ INDEX_op_ext16s_i32, { "r", "r" } },
|
|
||||||
{ INDEX_op_ext16u_i32, { "r", "r" } },
|
|
||||||
|
|
||||||
{ INDEX_op_bswap16_i32, { "r", "r" } },
|
|
||||||
{ INDEX_op_bswap32_i32, { "r", "r" } },
|
|
||||||
|
|
||||||
{ INDEX_op_clz_i64, { "r", "r", "ri" } },
|
|
||||||
|
|
||||||
{ INDEX_op_add2_i32, { "r", "r", "0", "1", "rA", "r" } },
|
|
||||||
{ INDEX_op_sub2_i32, { "r", "r", "0", "1", "rA", "r" } },
|
|
||||||
|
|
||||||
{ INDEX_op_brcond_i32, { "r", "rC" } },
|
|
||||||
{ INDEX_op_setcond_i32, { "r", "r", "rC" } },
|
|
||||||
{ INDEX_op_movcond_i32, { "r", "r", "rC", "r", "0" } },
|
|
||||||
{ INDEX_op_deposit_i32, { "r", "rZ", "r" } },
|
|
||||||
{ INDEX_op_extract_i32, { "r", "r" } },
|
|
||||||
|
|
||||||
{ INDEX_op_qemu_ld_i32, { "r", "L" } },
|
|
||||||
{ INDEX_op_qemu_ld_i64, { "r", "L" } },
|
|
||||||
{ INDEX_op_qemu_st_i32, { "L", "L" } },
|
|
||||||
{ INDEX_op_qemu_st_i64, { "L", "L" } },
|
|
||||||
|
|
||||||
{ INDEX_op_ld8u_i64, { "r", "r" } },
|
|
||||||
{ INDEX_op_ld8s_i64, { "r", "r" } },
|
|
||||||
{ INDEX_op_ld16u_i64, { "r", "r" } },
|
|
||||||
{ INDEX_op_ld16s_i64, { "r", "r" } },
|
|
||||||
{ INDEX_op_ld32u_i64, { "r", "r" } },
|
|
||||||
{ INDEX_op_ld32s_i64, { "r", "r" } },
|
|
||||||
{ INDEX_op_ld_i64, { "r", "r" } },
|
|
||||||
|
|
||||||
{ INDEX_op_st8_i64, { "r", "r" } },
|
|
||||||
{ INDEX_op_st16_i64, { "r", "r" } },
|
|
||||||
{ INDEX_op_st32_i64, { "r", "r" } },
|
|
||||||
{ INDEX_op_st_i64, { "r", "r" } },
|
|
||||||
|
|
||||||
{ INDEX_op_add_i64, { "r", "r", "ri" } },
|
|
||||||
{ INDEX_op_sub_i64, { "r", "0", "ri" } },
|
|
||||||
{ INDEX_op_mul_i64, { "r", "0", "rK" } },
|
|
||||||
|
|
||||||
{ INDEX_op_div2_i64, { "b", "a", "0", "1", "r" } },
|
|
||||||
{ INDEX_op_divu2_i64, { "b", "a", "0", "1", "r" } },
|
|
||||||
{ INDEX_op_mulu2_i64, { "b", "a", "0", "r" } },
|
|
||||||
|
|
||||||
{ INDEX_op_and_i64, { "r", "0", "ri" } },
|
|
||||||
{ INDEX_op_or_i64, { "r", "0", "rO" } },
|
|
||||||
{ INDEX_op_xor_i64, { "r", "0", "rX" } },
|
|
||||||
|
|
||||||
{ INDEX_op_neg_i64, { "r", "r" } },
|
|
||||||
|
|
||||||
{ INDEX_op_shl_i64, { "r", "r", "ri" } },
|
|
||||||
{ INDEX_op_shr_i64, { "r", "r", "ri" } },
|
|
||||||
{ INDEX_op_sar_i64, { "r", "r", "ri" } },
|
|
||||||
|
|
||||||
{ INDEX_op_rotl_i64, { "r", "r", "ri" } },
|
|
||||||
{ INDEX_op_rotr_i64, { "r", "r", "ri" } },
|
|
||||||
|
|
||||||
{ INDEX_op_ext8s_i64, { "r", "r" } },
|
|
||||||
{ INDEX_op_ext8u_i64, { "r", "r" } },
|
|
||||||
{ INDEX_op_ext16s_i64, { "r", "r" } },
|
|
||||||
{ INDEX_op_ext16u_i64, { "r", "r" } },
|
|
||||||
{ INDEX_op_ext32s_i64, { "r", "r" } },
|
|
||||||
{ INDEX_op_ext32u_i64, { "r", "r" } },
|
|
||||||
|
|
||||||
{ INDEX_op_ext_i32_i64, { "r", "r" } },
|
|
||||||
{ INDEX_op_extu_i32_i64, { "r", "r" } },
|
|
||||||
|
|
||||||
{ INDEX_op_bswap16_i64, { "r", "r" } },
|
|
||||||
{ INDEX_op_bswap32_i64, { "r", "r" } },
|
|
||||||
{ INDEX_op_bswap64_i64, { "r", "r" } },
|
|
||||||
|
|
||||||
{ INDEX_op_add2_i64, { "r", "r", "0", "1", "rA", "r" } },
|
|
||||||
{ INDEX_op_sub2_i64, { "r", "r", "0", "1", "rA", "r" } },
|
|
||||||
|
|
||||||
{ INDEX_op_brcond_i64, { "r", "rC" } },
|
|
||||||
{ INDEX_op_setcond_i64, { "r", "r", "rC" } },
|
|
||||||
{ INDEX_op_movcond_i64, { "r", "r", "rC", "r", "0" } },
|
|
||||||
{ INDEX_op_deposit_i64, { "r", "0", "r" } },
|
|
||||||
{ INDEX_op_extract_i64, { "r", "r" } },
|
|
||||||
|
|
||||||
{ INDEX_op_mb, { } },
|
|
||||||
{ -1 },
|
|
||||||
};
|
|
||||||
|
|
||||||
static void query_s390_facilities(void)
|
|
||||||
{
|
|
||||||
unsigned long hwcap = qemu_getauxval(AT_HWCAP);
|
|
||||||
|
|
||||||
/* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this
|
|
||||||
is present on all 64-bit systems, but let's check for it anyway. */
|
|
||||||
if (hwcap & HWCAP_S390_STFLE) {
|
|
||||||
register int r0 __asm__("0");
|
|
||||||
register void *r1 __asm__("1");
|
|
||||||
|
|
||||||
/* stfle 0(%r1) */
|
|
||||||
r1 = &s390_facilities;
|
|
||||||
asm volatile(".word 0xb2b0,0x1000"
|
|
||||||
: "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
|
static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
|
||||||
{
|
{
|
||||||
static const TCGTargetOpDef r = { 0, { "r" } };
|
static const TCGTargetOpDef r = { 0, { "r" } };
|
||||||
|
@ -2465,8 +2321,6 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
|
||||||
static const TCGTargetOpDef r_L = { 0, { "r", "L" } };
|
static const TCGTargetOpDef r_L = { 0, { "r", "L" } };
|
||||||
static const TCGTargetOpDef L_L = { 0, { "L", "L" } };
|
static const TCGTargetOpDef L_L = { 0, { "L", "L" } };
|
||||||
static const TCGTargetOpDef r_ri = { 0, { "r", "ri" } };
|
static const TCGTargetOpDef r_ri = { 0, { "r", "ri" } };
|
||||||
static const TCGTargetOpDef r_rC = { 0, { "r", "rC" } };
|
|
||||||
static const TCGTargetOpDef r_rZ = { 0, { "r", "rZ" } };
|
|
||||||
static const TCGTargetOpDef r_r_ri = { 0, { "r", "r", "ri" } };
|
static const TCGTargetOpDef r_r_ri = { 0, { "r", "r", "ri" } };
|
||||||
static const TCGTargetOpDef r_0_ri = { 0, { "r", "0", "ri" } };
|
static const TCGTargetOpDef r_0_ri = { 0, { "r", "0", "ri" } };
|
||||||
static const TCGTargetOpDef r_0_rI = { 0, { "r", "0", "rI" } };
|
static const TCGTargetOpDef r_0_rI = { 0, { "r", "0", "rI" } };
|
||||||
|
@ -2538,10 +2392,8 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
|
||||||
return &r_r_ri;
|
return &r_r_ri;
|
||||||
|
|
||||||
case INDEX_op_brcond_i32:
|
case INDEX_op_brcond_i32:
|
||||||
/* Without EXT_IMM, only the LOAD AND TEST insn is available. */
|
|
||||||
return (s390_facilities & FACILITY_EXT_IMM ? &r_ri : &r_rZ);
|
|
||||||
case INDEX_op_brcond_i64:
|
case INDEX_op_brcond_i64:
|
||||||
return (s390_facilities & FACILITY_EXT_IMM ? &r_rC : &r_rZ);
|
return &r_ri;
|
||||||
|
|
||||||
case INDEX_op_bswap16_i32:
|
case INDEX_op_bswap16_i32:
|
||||||
case INDEX_op_bswap16_i64:
|
case INDEX_op_bswap16_i64:
|
||||||
|
@ -2567,6 +2419,8 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
|
||||||
return &r_r;
|
return &r_r;
|
||||||
|
|
||||||
case INDEX_op_clz_i64:
|
case INDEX_op_clz_i64:
|
||||||
|
case INDEX_op_setcond_i32:
|
||||||
|
case INDEX_op_setcond_i64:
|
||||||
return &r_r_ri;
|
return &r_r_ri;
|
||||||
|
|
||||||
case INDEX_op_qemu_ld_i32:
|
case INDEX_op_qemu_ld_i32:
|
||||||
|
@ -2575,31 +2429,19 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
|
||||||
case INDEX_op_qemu_st_i64:
|
case INDEX_op_qemu_st_i64:
|
||||||
case INDEX_op_qemu_st_i32:
|
case INDEX_op_qemu_st_i32:
|
||||||
return &L_L;
|
return &L_L;
|
||||||
|
|
||||||
case INDEX_op_deposit_i32:
|
case INDEX_op_deposit_i32:
|
||||||
case INDEX_op_deposit_i64:
|
case INDEX_op_deposit_i64:
|
||||||
{
|
{
|
||||||
static const TCGTargetOpDef dep = { 0, { "r", "rZ", "r" } };
|
static const TCGTargetOpDef dep = { 0, { "r", "rZ", "r" } };
|
||||||
return &dep;
|
return &dep;
|
||||||
}
|
}
|
||||||
case INDEX_op_setcond_i32:
|
|
||||||
case INDEX_op_setcond_i64:
|
|
||||||
{
|
|
||||||
/* Without EXT_IMM, only the LOAD AND TEST insn is available. */
|
|
||||||
static const TCGTargetOpDef setc_z = { 0, { "r", "r", "rZ" } };
|
|
||||||
static const TCGTargetOpDef setc_c = { 0, { "r", "r", "rC" } };
|
|
||||||
return (s390_facilities & FACILITY_EXT_IMM ? &setc_c : &setc_z);
|
|
||||||
}
|
|
||||||
case INDEX_op_movcond_i32:
|
case INDEX_op_movcond_i32:
|
||||||
case INDEX_op_movcond_i64:
|
case INDEX_op_movcond_i64:
|
||||||
{
|
{
|
||||||
/* Without EXT_IMM, only the LOAD AND TEST insn is available. */
|
static const TCGTargetOpDef movc = { 0, { "r", "r", "ri", "r", "0" } };
|
||||||
static const TCGTargetOpDef movc_z = { 0, { "r", "r", "rZ", "r", "0" } };
|
static const TCGTargetOpDef movc_l = { 0, { "r", "r", "ri", "rI", "0" } };
|
||||||
static const TCGTargetOpDef movc_c = { 0, { "r", "r", "rC", "r", "0" } };
|
return (s390_facilities & FACILITY_LOAD_ON_COND2 ? &movc_l : &movc);
|
||||||
static const TCGTargetOpDef movc_l = { 0, { "r", "r", "rC", "rI", "0" } };
|
|
||||||
return (s390_facilities & FACILITY_EXT_IMM
|
|
||||||
? (s390_facilities & FACILITY_LOAD_ON_COND2
|
|
||||||
? &movc_l : &movc_c)
|
|
||||||
: &movc_z);
|
|
||||||
}
|
}
|
||||||
case INDEX_op_div2_i32:
|
case INDEX_op_div2_i32:
|
||||||
case INDEX_op_div2_i64:
|
case INDEX_op_div2_i64:
|
||||||
|
@ -2628,6 +2470,23 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void query_s390_facilities(void)
|
||||||
|
{
|
||||||
|
unsigned long hwcap = qemu_getauxval(AT_HWCAP);
|
||||||
|
|
||||||
|
/* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this
|
||||||
|
is present on all 64-bit systems, but let's check for it anyway. */
|
||||||
|
if (hwcap & HWCAP_S390_STFLE) {
|
||||||
|
register int r0 __asm__("0");
|
||||||
|
register void *r1 __asm__("1");
|
||||||
|
|
||||||
|
/* stfle 0(%r1) */
|
||||||
|
r1 = &s390_facilities;
|
||||||
|
asm volatile(".word 0xb2b0,0x1000"
|
||||||
|
: "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void tcg_target_init(TCGContext *s)
|
static void tcg_target_init(TCGContext *s)
|
||||||
{
|
{
|
||||||
query_s390_facilities();
|
query_s390_facilities();
|
||||||
|
@ -2682,13 +2541,14 @@ static void tcg_target_qemu_prologue(TCGContext *s)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
|
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
|
||||||
/* br %r3 (go to TB) */
|
|
||||||
tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);
|
|
||||||
if (USE_REG_TB) {
|
if (USE_REG_TB) {
|
||||||
tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB,
|
tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB,
|
||||||
tcg_target_call_iarg_regs[1]);
|
tcg_target_call_iarg_regs[1]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* br %r3 (go to TB) */
|
||||||
|
tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return path for goto_ptr. Set return value to 0, a-la exit_tb,
|
* Return path for goto_ptr. Set return value to 0, a-la exit_tb,
|
||||||
* and fall through to the rest of the epilogue.
|
* and fall through to the rest of the epilogue.
|
||||||
|
|
|
@ -83,10 +83,8 @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
|
||||||
#define TCG_REG_T1 TCG_REG_G1
|
#define TCG_REG_T1 TCG_REG_G1
|
||||||
#define TCG_REG_T2 TCG_REG_O7
|
#define TCG_REG_T2 TCG_REG_O7
|
||||||
|
|
||||||
#ifdef CONFIG_USE_GUEST_BASE
|
#ifndef CONFIG_SOFTMMU
|
||||||
# define TCG_GUEST_BASE_REG TCG_REG_I5
|
# define TCG_GUEST_BASE_REG TCG_REG_I5
|
||||||
#else
|
|
||||||
# define TCG_GUEST_BASE_REG TCG_REG_G0
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define TCG_REG_TB TCG_REG_I1
|
#define TCG_REG_TB TCG_REG_I1
|
||||||
|
@ -311,7 +309,6 @@ static void patch_reloc(tcg_insn_unit *code_ptr, int type,
|
||||||
case R_SPARC_WDISP19:
|
case R_SPARC_WDISP19:
|
||||||
assert(check_fit_ptr(pcrel >> 2, 19));
|
assert(check_fit_ptr(pcrel >> 2, 19));
|
||||||
insn &= ~INSN_OFF19(-1);
|
insn &= ~INSN_OFF19(-1);
|
||||||
insn |= INSN_OFF19(value);
|
|
||||||
insn |= INSN_OFF19(pcrel);
|
insn |= INSN_OFF19(pcrel);
|
||||||
break;
|
break;
|
||||||
case R_SPARC_13:
|
case R_SPARC_13:
|
||||||
|
@ -1207,7 +1204,7 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
|
||||||
tcg_insn_unit *func;
|
tcg_insn_unit *func;
|
||||||
tcg_insn_unit *label_ptr;
|
tcg_insn_unit *label_ptr;
|
||||||
|
|
||||||
addrz = tcg_out_tlb_load(s, addr, memi, memop & MO_SIZE,
|
addrz = tcg_out_tlb_load(s, addr, memi, memop,
|
||||||
offsetof(CPUTLBEntry, addr_read));
|
offsetof(CPUTLBEntry, addr_read));
|
||||||
|
|
||||||
/* The fast path is exactly one insn. Thus we can perform the
|
/* The fast path is exactly one insn. Thus we can perform the
|
||||||
|
@ -1274,7 +1271,7 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
|
||||||
addr = TCG_REG_T1;
|
addr = TCG_REG_T1;
|
||||||
}
|
}
|
||||||
tcg_out_ldst_rr(s, data, addr,
|
tcg_out_ldst_rr(s, data, addr,
|
||||||
(GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
|
(guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
|
||||||
qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
|
qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
|
||||||
#endif /* CONFIG_SOFTMMU */
|
#endif /* CONFIG_SOFTMMU */
|
||||||
}
|
}
|
||||||
|
@ -1289,7 +1286,7 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
|
||||||
tcg_insn_unit *func;
|
tcg_insn_unit *func;
|
||||||
tcg_insn_unit *label_ptr;
|
tcg_insn_unit *label_ptr;
|
||||||
|
|
||||||
addrz = tcg_out_tlb_load(s, addr, memi, memop & MO_SIZE,
|
addrz = tcg_out_tlb_load(s, addr, memi, memop,
|
||||||
offsetof(CPUTLBEntry, addr_write));
|
offsetof(CPUTLBEntry, addr_write));
|
||||||
|
|
||||||
/* The fast path is exactly one insn. Thus we can perform the entire
|
/* The fast path is exactly one insn. Thus we can perform the entire
|
||||||
|
@ -1329,7 +1326,7 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
|
||||||
addr = TCG_REG_T1;
|
addr = TCG_REG_T1;
|
||||||
}
|
}
|
||||||
tcg_out_ldst_rr(s, data, addr,
|
tcg_out_ldst_rr(s, data, addr,
|
||||||
(GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
|
(guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
|
||||||
qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
|
qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
|
||||||
#endif /* CONFIG_SOFTMMU */
|
#endif /* CONFIG_SOFTMMU */
|
||||||
}
|
}
|
||||||
|
@ -1662,6 +1659,7 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
|
||||||
switch (op) {
|
switch (op) {
|
||||||
case INDEX_op_goto_ptr:
|
case INDEX_op_goto_ptr:
|
||||||
return &r;
|
return &r;
|
||||||
|
|
||||||
case INDEX_op_ld8u_i32:
|
case INDEX_op_ld8u_i32:
|
||||||
case INDEX_op_ld8s_i32:
|
case INDEX_op_ld8s_i32:
|
||||||
case INDEX_op_ld16u_i32:
|
case INDEX_op_ld16u_i32:
|
||||||
|
|
Loading…
Reference in a new issue