mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2025-03-08 10:09:43 +00:00
tcg: Update all other target source files to be in sync with qemu
This commit is contained in:
parent
2e5cfbcfbf
commit
83398bf99c
|
@ -144,5 +144,6 @@ void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t);
|
|||
#ifdef CONFIG_SOFTMMU
|
||||
#define TCG_TARGET_NEED_LDST_LABELS
|
||||
#endif
|
||||
#define TCG_TARGET_NEED_POOL_LABELS
|
||||
|
||||
#endif
|
||||
|
|
|
@ -89,14 +89,14 @@ static const int tcg_target_call_oarg_regs[2] = {
|
|||
enum arm_cond_code_e {
|
||||
COND_EQ = 0x0,
|
||||
COND_NE = 0x1,
|
||||
COND_CS = 0x2, /* Unsigned greater or equal */
|
||||
COND_CC = 0x3, /* Unsigned less than */
|
||||
COND_MI = 0x4, /* Negative */
|
||||
COND_PL = 0x5, /* Zero or greater */
|
||||
COND_VS = 0x6, /* Overflow */
|
||||
COND_VC = 0x7, /* No overflow */
|
||||
COND_HI = 0x8, /* Unsigned greater than */
|
||||
COND_LS = 0x9, /* Unsigned less or equal */
|
||||
COND_CS = 0x2, /* Unsigned greater or equal */
|
||||
COND_CC = 0x3, /* Unsigned less than */
|
||||
COND_MI = 0x4, /* Negative */
|
||||
COND_PL = 0x5, /* Zero or greater */
|
||||
COND_VS = 0x6, /* Overflow */
|
||||
COND_VC = 0x7, /* No overflow */
|
||||
COND_HI = 0x8, /* Unsigned greater than */
|
||||
COND_LS = 0x9, /* Unsigned less or equal */
|
||||
COND_GE = 0xa,
|
||||
COND_LT = 0xb,
|
||||
COND_GT = 0xc,
|
||||
|
@ -106,14 +106,14 @@ enum arm_cond_code_e {
|
|||
|
||||
#define TO_CPSR (1 << 20)
|
||||
|
||||
#define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
|
||||
#define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
|
||||
#define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
|
||||
#define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
|
||||
#define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
|
||||
#define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
|
||||
#define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
|
||||
#define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
|
||||
#define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
|
||||
#define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
|
||||
#define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
|
||||
#define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
|
||||
#define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
|
||||
#define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
|
||||
#define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
|
||||
#define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
|
||||
|
||||
typedef enum {
|
||||
ARITH_AND = 0x0 << 21,
|
||||
|
@ -1242,7 +1242,7 @@ static TCGReg tcg_out_arg_reg64(TCGContext *s, TCGReg argreg,
|
|||
}
|
||||
}
|
||||
|
||||
#define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
|
||||
#define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
|
||||
|
||||
/* We're expecting to use an 8-bit immediate and to mask. */
|
||||
QEMU_BUILD_BUG_ON(CPU_TLB_BITS > 8);
|
||||
|
@ -2026,6 +2026,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
|
|||
tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
|
||||
ARITH_MOV, args[0], 0, 0);
|
||||
break;
|
||||
|
||||
case INDEX_op_brcond2_i32:
|
||||
c = tcg_out_cmp2(s, args, const_args);
|
||||
tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[5]));
|
||||
|
@ -2125,6 +2126,7 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
|
|||
switch (op) {
|
||||
case INDEX_op_goto_ptr:
|
||||
return &r;
|
||||
|
||||
case INDEX_op_ld8u_i32:
|
||||
case INDEX_op_ld8s_i32:
|
||||
case INDEX_op_ld16u_i32:
|
||||
|
@ -2143,6 +2145,7 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
|
|||
case INDEX_op_extract_i32:
|
||||
case INDEX_op_sextract_i32:
|
||||
return &r_r;
|
||||
|
||||
case INDEX_op_add_i32:
|
||||
case INDEX_op_sub_i32:
|
||||
case INDEX_op_setcond_i32:
|
||||
|
@ -2168,6 +2171,7 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
|
|||
case INDEX_op_rotl_i32:
|
||||
case INDEX_op_rotr_i32:
|
||||
return &r_r_ri;
|
||||
|
||||
case INDEX_op_brcond_i32:
|
||||
return &br;
|
||||
case INDEX_op_deposit_i32:
|
||||
|
@ -2182,6 +2186,7 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
|
|||
return &br2;
|
||||
case INDEX_op_setcond2_i32:
|
||||
return &setc2;
|
||||
|
||||
case INDEX_op_qemu_ld_i32:
|
||||
return TARGET_LONG_BITS == 32 ? &r_l : &r_l_l;
|
||||
case INDEX_op_qemu_ld_i64:
|
||||
|
@ -2190,6 +2195,7 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
|
|||
return TARGET_LONG_BITS == 32 ? &s_s : &s_s_s;
|
||||
case INDEX_op_qemu_st_i64:
|
||||
return TARGET_LONG_BITS == 32 ? &s_s_s : &s_s_s_s;
|
||||
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -649,11 +649,6 @@ static void tcg_out_bswap32(TCGContext *s, TCGReg ret, TCGReg arg)
|
|||
tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
|
||||
tcg_out_opc_sa(s, OPC_ROTR, ret, ret, 16);
|
||||
} else {
|
||||
/* ret and arg must be different and can't be register at */
|
||||
if (ret == arg || ret == TCG_TMP0 || arg == TCG_TMP0) {
|
||||
tcg_abort();
|
||||
}
|
||||
|
||||
tcg_out_bswap_subr(s, bswap32_addr);
|
||||
/* delay slot -- never omit the insn, like tcg_out_mov might. */
|
||||
tcg_out_opc_reg(s, OPC_OR, TCG_TMP0, arg, TCG_REG_ZERO);
|
||||
|
@ -762,6 +757,55 @@ static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
|
|||
return false;
|
||||
}
|
||||
|
||||
static void tcg_out_addsub2(TCGContext *s, TCGReg rl, TCGReg rh, TCGReg al,
|
||||
TCGReg ah, TCGArg bl, TCGArg bh, bool cbl,
|
||||
bool cbh, bool is_sub)
|
||||
{
|
||||
TCGReg th = TCG_TMP1;
|
||||
|
||||
/* If we have a negative constant such that negating it would
|
||||
make the high part zero, we can (usually) eliminate one insn. */
|
||||
if (cbl && cbh && bh == -1 && bl != 0) {
|
||||
bl = -bl;
|
||||
bh = 0;
|
||||
is_sub = !is_sub;
|
||||
}
|
||||
|
||||
/* By operating on the high part first, we get to use the final
|
||||
carry operation to move back from the temporary. */
|
||||
if (!cbh) {
|
||||
tcg_out_opc_reg(s, (is_sub ? OPC_SUBU : OPC_ADDU), th, ah, bh);
|
||||
} else if (bh != 0 || ah == rl) {
|
||||
tcg_out_opc_imm(s, OPC_ADDIU, th, ah, (is_sub ? -bh : bh));
|
||||
} else {
|
||||
th = ah;
|
||||
}
|
||||
|
||||
/* Note that tcg optimization should eliminate the bl == 0 case. */
|
||||
if (is_sub) {
|
||||
if (cbl) {
|
||||
tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, al, bl);
|
||||
tcg_out_opc_imm(s, OPC_ADDIU, rl, al, -bl);
|
||||
} else {
|
||||
tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, al, bl);
|
||||
tcg_out_opc_reg(s, OPC_SUBU, rl, al, bl);
|
||||
}
|
||||
tcg_out_opc_reg(s, OPC_SUBU, rh, th, TCG_TMP0);
|
||||
} else {
|
||||
if (cbl) {
|
||||
tcg_out_opc_imm(s, OPC_ADDIU, rl, al, bl);
|
||||
tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, rl, bl);
|
||||
} else if (rl == al && rl == bl) {
|
||||
tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, al, 31);
|
||||
tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl);
|
||||
} else {
|
||||
tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl);
|
||||
tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, rl, (rl == bl ? al : bl));
|
||||
}
|
||||
tcg_out_opc_reg(s, OPC_ADDU, rh, th, TCG_TMP0);
|
||||
}
|
||||
}
|
||||
|
||||
/* Bit 0 set if inversion required; bit 1 set if swapping required. */
|
||||
#define MIPS_CMP_INV 1
|
||||
#define MIPS_CMP_SWAP 2
|
||||
|
@ -1271,7 +1315,7 @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi,
|
|||
|
||||
static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
|
||||
{
|
||||
TCGMemOpIdx oi = lb->oi;
|
||||
TCGMemOpIdx oi = l->oi;
|
||||
TCGMemOp opc = get_memop(oi);
|
||||
TCGReg v0;
|
||||
int i;
|
||||
|
@ -1319,7 +1363,7 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
|
|||
|
||||
static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
|
||||
{
|
||||
TCGMemOpIdx oi = lb->oi;
|
||||
TCGMemOpIdx oi = l->oi;
|
||||
TCGMemOp opc = get_memop(oi);
|
||||
TCGMemOp s_bits = opc & MO_SIZE;
|
||||
int i;
|
||||
|
@ -1424,6 +1468,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
|
|||
}
|
||||
/* FALLTHRU */
|
||||
case MO_SL:
|
||||
tcg_out_opc_imm(s, OPC_LW, lo, base, 0);
|
||||
break;
|
||||
case MO_Q | MO_BSWAP:
|
||||
if (TCG_TARGET_REG_BITS == 64) {
|
||||
|
@ -1574,55 +1619,6 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
|
|||
}
|
||||
}
|
||||
|
||||
static void tcg_out_addsub2(TCGContext *s, TCGReg rl, TCGReg rh, TCGReg al,
|
||||
TCGReg ah, TCGArg bl, TCGArg bh, bool cbl,
|
||||
bool cbh, bool is_sub)
|
||||
{
|
||||
TCGReg th = TCG_TMP1;
|
||||
|
||||
/* If we have a negative constant such that negating it would
|
||||
make the high part zero, we can (usually) eliminate one insn. */
|
||||
if (cbl && cbh && bh == -1 && bl != 0) {
|
||||
bl = -bl;
|
||||
bh = 0;
|
||||
is_sub = !is_sub;
|
||||
}
|
||||
|
||||
/* By operating on the high part first, we get to use the final
|
||||
carry operation to move back from the temporary. */
|
||||
if (!cbh) {
|
||||
tcg_out_opc_reg(s, (is_sub ? OPC_SUBU : OPC_ADDU), th, ah, bh);
|
||||
} else if (bh != 0 || ah == rl) {
|
||||
tcg_out_opc_imm(s, OPC_ADDIU, th, ah, (is_sub ? -bh : bh));
|
||||
} else {
|
||||
th = ah;
|
||||
}
|
||||
|
||||
/* Note that tcg optimization should eliminate the bl == 0 case. */
|
||||
if (is_sub) {
|
||||
if (cbl) {
|
||||
tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, al, bl);
|
||||
tcg_out_opc_imm(s, OPC_ADDIU, rl, al, -bl);
|
||||
} else {
|
||||
tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, al, bl);
|
||||
tcg_out_opc_reg(s, OPC_SUBU, rl, al, bl);
|
||||
}
|
||||
tcg_out_opc_reg(s, OPC_SUBU, rh, th, TCG_TMP0);
|
||||
} else {
|
||||
if (cbl) {
|
||||
tcg_out_opc_imm(s, OPC_ADDIU, rl, al, bl);
|
||||
tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, rl, bl);
|
||||
} else if (rl == al && rl == bl) {
|
||||
tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, al, 31);
|
||||
tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl);
|
||||
} else {
|
||||
tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl);
|
||||
tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, rl, (rl == bl ? al : bl));
|
||||
}
|
||||
tcg_out_opc_reg(s, OPC_ADDU, rh, th, TCG_TMP0);
|
||||
}
|
||||
}
|
||||
|
||||
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
|
||||
{
|
||||
TCGReg addr_regl, addr_regh QEMU_UNUSED_VAR;
|
||||
|
@ -1671,22 +1667,12 @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
|
|||
/* Note that SYNC_MB is a slightly weaker than SYNC 0,
|
||||
as the former is an ordering barrier and the latter
|
||||
is a completion barrier. */
|
||||
OPC_SYNC_MB,
|
||||
OPC_SYNC_RMB,
|
||||
OPC_SYNC_MB,
|
||||
OPC_SYNC_MB,
|
||||
OPC_SYNC_RELEASE,
|
||||
OPC_SYNC_ACQUIRE,
|
||||
OPC_SYNC_MB,
|
||||
OPC_SYNC_MB,
|
||||
OPC_SYNC_WMB,
|
||||
OPC_SYNC_MB,
|
||||
OPC_SYNC_MB,
|
||||
OPC_SYNC_MB,
|
||||
OPC_SYNC_RELEASE,
|
||||
OPC_SYNC_MB,
|
||||
OPC_SYNC_MB,
|
||||
OPC_SYNC_MB,
|
||||
[0 ... TCG_MO_ALL] = OPC_SYNC_MB,
|
||||
[TCG_MO_LD_LD] = OPC_SYNC_RMB,
|
||||
[TCG_MO_ST_ST] = OPC_SYNC_WMB,
|
||||
[TCG_MO_LD_ST] = OPC_SYNC_RELEASE,
|
||||
[TCG_MO_LD_ST | TCG_MO_ST_ST] = OPC_SYNC_RELEASE,
|
||||
[TCG_MO_LD_ST | TCG_MO_LD_LD] = OPC_SYNC_ACQUIRE,
|
||||
};
|
||||
tcg_out32(s, sync[a0 & TCG_MO_ALL]);
|
||||
}
|
||||
|
@ -2062,7 +2048,6 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
|
|||
tcg_out_opc_reg(s, OPC_ROTRV, a0, TCG_TMP0, a1);
|
||||
}
|
||||
break;
|
||||
|
||||
case INDEX_op_sar_i64:
|
||||
if (c2) {
|
||||
tcg_out_dsra(s, a0, a1, a2);
|
||||
|
@ -2210,6 +2195,7 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
|
|||
switch (op) {
|
||||
case INDEX_op_goto_ptr:
|
||||
return &r;
|
||||
|
||||
case INDEX_op_ld8u_i32:
|
||||
case INDEX_op_ld8s_i32:
|
||||
case INDEX_op_ld16u_i32:
|
||||
|
@ -2357,7 +2343,6 @@ static const int tcg_target_callee_save_regs[] = {
|
|||
/* The Linux kernel doesn't provide any information about the available
|
||||
instruction set. Probe it using a signal handler. */
|
||||
|
||||
#include <signal.h>
|
||||
|
||||
#ifndef use_movnz_instructions
|
||||
bool use_movnz_instructions = false;
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -55,15 +55,10 @@
|
|||
#define TCG_REG_TB TCG_REG_R12
|
||||
#define USE_REG_TB (!(s390_facilities & FACILITY_GEN_INST_EXT))
|
||||
|
||||
#ifdef CONFIG_USE_GUEST_BASE
|
||||
#ifndef CONFIG_SOFTMMU
|
||||
#define TCG_GUEST_BASE_REG TCG_REG_R13
|
||||
#endif
|
||||
|
||||
#ifndef GUEST_BASE
|
||||
#define GUEST_BASE 0
|
||||
#endif
|
||||
|
||||
|
||||
/* All of the following instructions are prefixed with their instruction
|
||||
format, and are defined as 8- or 16-bit quantities, even when the two
|
||||
halves of the 16-bit quantity may appear 32 bits apart in the insn.
|
||||
|
@ -369,7 +364,6 @@ static void * const qemu_st_helpers[16] = {
|
|||
#endif
|
||||
|
||||
static tcg_insn_unit *tb_ret_addr;
|
||||
|
||||
uint64_t s390_facilities;
|
||||
|
||||
static void patch_reloc(tcg_insn_unit *code_ptr, int type,
|
||||
|
@ -1041,6 +1035,7 @@ static void tgen_xori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
|
|||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* Use the constant pool if USE_REG_TB, but not for small constants. */
|
||||
if (maybe_out_small_movi(s, type, TCG_TMP0, val)) {
|
||||
if (type == TCG_TYPE_I32) {
|
||||
|
@ -1548,7 +1543,6 @@ QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1])
|
|||
static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc,
|
||||
int mem_index, bool is_ld)
|
||||
{
|
||||
int s_mask = (1 << (opc & MO_SIZE)) - 1;
|
||||
unsigned s_bits = opc & MO_SIZE;
|
||||
unsigned a_bits = get_alignment_bits(opc);
|
||||
unsigned s_mask = (1 << s_bits) - 1;
|
||||
|
@ -1681,9 +1675,9 @@ static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
|
|||
tgen_ext32u(s, TCG_TMP0, *addr_reg);
|
||||
*addr_reg = TCG_TMP0;
|
||||
}
|
||||
if (GUEST_BASE < 0x80000) {
|
||||
if (guest_base < 0x80000) {
|
||||
*index_reg = TCG_REG_NONE;
|
||||
*disp = GUEST_BASE;
|
||||
*disp = guest_base;
|
||||
} else {
|
||||
*index_reg = TCG_GUEST_BASE_REG;
|
||||
*disp = 0;
|
||||
|
@ -1878,7 +1872,6 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
|
|||
} else {
|
||||
tcg_out_insn(s, RRF, SRK, a0, a1, a2);
|
||||
}
|
||||
tcg_out_insn(s, RR, SR, args[0], args[2]);
|
||||
break;
|
||||
|
||||
case INDEX_op_and_i32:
|
||||
|
@ -2321,143 +2314,6 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
|
|||
}
|
||||
}
|
||||
|
||||
static const TCGTargetOpDef s390_op_defs[] = {
|
||||
{ INDEX_op_exit_tb, { } },
|
||||
{ INDEX_op_goto_tb, { } },
|
||||
{ INDEX_op_br, { } },
|
||||
{ INDEX_op_goto_ptr, { "r" } },
|
||||
|
||||
{ INDEX_op_ld8u_i32, { "r", "r" } },
|
||||
{ INDEX_op_ld8s_i32, { "r", "r" } },
|
||||
{ INDEX_op_ld16u_i32, { "r", "r" } },
|
||||
{ INDEX_op_ld16s_i32, { "r", "r" } },
|
||||
{ INDEX_op_ld_i32, { "r", "r" } },
|
||||
{ INDEX_op_st8_i32, { "r", "r" } },
|
||||
{ INDEX_op_st16_i32, { "r", "r" } },
|
||||
{ INDEX_op_st_i32, { "r", "r" } },
|
||||
|
||||
{ INDEX_op_add_i32, { "r", "r", "ri" } },
|
||||
{ INDEX_op_sub_i32, { "r", "0", "ri" } },
|
||||
{ INDEX_op_mul_i32, { "r", "0", "rK" } },
|
||||
|
||||
{ INDEX_op_div2_i32, { "b", "a", "0", "1", "r" } },
|
||||
{ INDEX_op_divu2_i32, { "b", "a", "0", "1", "r" } },
|
||||
|
||||
{ INDEX_op_and_i32, { "r", "0", "ri" } },
|
||||
{ INDEX_op_or_i32, { "r", "0", "rO" } },
|
||||
{ INDEX_op_xor_i32, { "r", "0", "rX" } },
|
||||
|
||||
{ INDEX_op_neg_i32, { "r", "r" } },
|
||||
|
||||
{ INDEX_op_shl_i32, { "r", "0", "ri" } },
|
||||
{ INDEX_op_shr_i32, { "r", "0", "ri" } },
|
||||
{ INDEX_op_sar_i32, { "r", "0", "ri" } },
|
||||
|
||||
{ INDEX_op_rotl_i32, { "r", "r", "ri" } },
|
||||
{ INDEX_op_rotr_i32, { "r", "r", "ri" } },
|
||||
|
||||
{ INDEX_op_ext8s_i32, { "r", "r" } },
|
||||
{ INDEX_op_ext8u_i32, { "r", "r" } },
|
||||
{ INDEX_op_ext16s_i32, { "r", "r" } },
|
||||
{ INDEX_op_ext16u_i32, { "r", "r" } },
|
||||
|
||||
{ INDEX_op_bswap16_i32, { "r", "r" } },
|
||||
{ INDEX_op_bswap32_i32, { "r", "r" } },
|
||||
|
||||
{ INDEX_op_clz_i64, { "r", "r", "ri" } },
|
||||
|
||||
{ INDEX_op_add2_i32, { "r", "r", "0", "1", "rA", "r" } },
|
||||
{ INDEX_op_sub2_i32, { "r", "r", "0", "1", "rA", "r" } },
|
||||
|
||||
{ INDEX_op_brcond_i32, { "r", "rC" } },
|
||||
{ INDEX_op_setcond_i32, { "r", "r", "rC" } },
|
||||
{ INDEX_op_movcond_i32, { "r", "r", "rC", "r", "0" } },
|
||||
{ INDEX_op_deposit_i32, { "r", "rZ", "r" } },
|
||||
{ INDEX_op_extract_i32, { "r", "r" } },
|
||||
|
||||
{ INDEX_op_qemu_ld_i32, { "r", "L" } },
|
||||
{ INDEX_op_qemu_ld_i64, { "r", "L" } },
|
||||
{ INDEX_op_qemu_st_i32, { "L", "L" } },
|
||||
{ INDEX_op_qemu_st_i64, { "L", "L" } },
|
||||
|
||||
{ INDEX_op_ld8u_i64, { "r", "r" } },
|
||||
{ INDEX_op_ld8s_i64, { "r", "r" } },
|
||||
{ INDEX_op_ld16u_i64, { "r", "r" } },
|
||||
{ INDEX_op_ld16s_i64, { "r", "r" } },
|
||||
{ INDEX_op_ld32u_i64, { "r", "r" } },
|
||||
{ INDEX_op_ld32s_i64, { "r", "r" } },
|
||||
{ INDEX_op_ld_i64, { "r", "r" } },
|
||||
|
||||
{ INDEX_op_st8_i64, { "r", "r" } },
|
||||
{ INDEX_op_st16_i64, { "r", "r" } },
|
||||
{ INDEX_op_st32_i64, { "r", "r" } },
|
||||
{ INDEX_op_st_i64, { "r", "r" } },
|
||||
|
||||
{ INDEX_op_add_i64, { "r", "r", "ri" } },
|
||||
{ INDEX_op_sub_i64, { "r", "0", "ri" } },
|
||||
{ INDEX_op_mul_i64, { "r", "0", "rK" } },
|
||||
|
||||
{ INDEX_op_div2_i64, { "b", "a", "0", "1", "r" } },
|
||||
{ INDEX_op_divu2_i64, { "b", "a", "0", "1", "r" } },
|
||||
{ INDEX_op_mulu2_i64, { "b", "a", "0", "r" } },
|
||||
|
||||
{ INDEX_op_and_i64, { "r", "0", "ri" } },
|
||||
{ INDEX_op_or_i64, { "r", "0", "rO" } },
|
||||
{ INDEX_op_xor_i64, { "r", "0", "rX" } },
|
||||
|
||||
{ INDEX_op_neg_i64, { "r", "r" } },
|
||||
|
||||
{ INDEX_op_shl_i64, { "r", "r", "ri" } },
|
||||
{ INDEX_op_shr_i64, { "r", "r", "ri" } },
|
||||
{ INDEX_op_sar_i64, { "r", "r", "ri" } },
|
||||
|
||||
{ INDEX_op_rotl_i64, { "r", "r", "ri" } },
|
||||
{ INDEX_op_rotr_i64, { "r", "r", "ri" } },
|
||||
|
||||
{ INDEX_op_ext8s_i64, { "r", "r" } },
|
||||
{ INDEX_op_ext8u_i64, { "r", "r" } },
|
||||
{ INDEX_op_ext16s_i64, { "r", "r" } },
|
||||
{ INDEX_op_ext16u_i64, { "r", "r" } },
|
||||
{ INDEX_op_ext32s_i64, { "r", "r" } },
|
||||
{ INDEX_op_ext32u_i64, { "r", "r" } },
|
||||
|
||||
{ INDEX_op_ext_i32_i64, { "r", "r" } },
|
||||
{ INDEX_op_extu_i32_i64, { "r", "r" } },
|
||||
|
||||
{ INDEX_op_bswap16_i64, { "r", "r" } },
|
||||
{ INDEX_op_bswap32_i64, { "r", "r" } },
|
||||
{ INDEX_op_bswap64_i64, { "r", "r" } },
|
||||
|
||||
{ INDEX_op_add2_i64, { "r", "r", "0", "1", "rA", "r" } },
|
||||
{ INDEX_op_sub2_i64, { "r", "r", "0", "1", "rA", "r" } },
|
||||
|
||||
{ INDEX_op_brcond_i64, { "r", "rC" } },
|
||||
{ INDEX_op_setcond_i64, { "r", "r", "rC" } },
|
||||
{ INDEX_op_movcond_i64, { "r", "r", "rC", "r", "0" } },
|
||||
{ INDEX_op_deposit_i64, { "r", "0", "r" } },
|
||||
{ INDEX_op_extract_i64, { "r", "r" } },
|
||||
|
||||
{ INDEX_op_mb, { } },
|
||||
{ -1 },
|
||||
};
|
||||
|
||||
static void query_s390_facilities(void)
|
||||
{
|
||||
unsigned long hwcap = qemu_getauxval(AT_HWCAP);
|
||||
|
||||
/* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this
|
||||
is present on all 64-bit systems, but let's check for it anyway. */
|
||||
if (hwcap & HWCAP_S390_STFLE) {
|
||||
register int r0 __asm__("0");
|
||||
register void *r1 __asm__("1");
|
||||
|
||||
/* stfle 0(%r1) */
|
||||
r1 = &s390_facilities;
|
||||
asm volatile(".word 0xb2b0,0x1000"
|
||||
: "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc");
|
||||
}
|
||||
}
|
||||
|
||||
static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
|
||||
{
|
||||
static const TCGTargetOpDef r = { 0, { "r" } };
|
||||
|
@ -2465,8 +2321,6 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
|
|||
static const TCGTargetOpDef r_L = { 0, { "r", "L" } };
|
||||
static const TCGTargetOpDef L_L = { 0, { "L", "L" } };
|
||||
static const TCGTargetOpDef r_ri = { 0, { "r", "ri" } };
|
||||
static const TCGTargetOpDef r_rC = { 0, { "r", "rC" } };
|
||||
static const TCGTargetOpDef r_rZ = { 0, { "r", "rZ" } };
|
||||
static const TCGTargetOpDef r_r_ri = { 0, { "r", "r", "ri" } };
|
||||
static const TCGTargetOpDef r_0_ri = { 0, { "r", "0", "ri" } };
|
||||
static const TCGTargetOpDef r_0_rI = { 0, { "r", "0", "rI" } };
|
||||
|
@ -2538,10 +2392,8 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
|
|||
return &r_r_ri;
|
||||
|
||||
case INDEX_op_brcond_i32:
|
||||
/* Without EXT_IMM, only the LOAD AND TEST insn is available. */
|
||||
return (s390_facilities & FACILITY_EXT_IMM ? &r_ri : &r_rZ);
|
||||
case INDEX_op_brcond_i64:
|
||||
return (s390_facilities & FACILITY_EXT_IMM ? &r_rC : &r_rZ);
|
||||
return &r_ri;
|
||||
|
||||
case INDEX_op_bswap16_i32:
|
||||
case INDEX_op_bswap16_i64:
|
||||
|
@ -2567,6 +2419,8 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
|
|||
return &r_r;
|
||||
|
||||
case INDEX_op_clz_i64:
|
||||
case INDEX_op_setcond_i32:
|
||||
case INDEX_op_setcond_i64:
|
||||
return &r_r_ri;
|
||||
|
||||
case INDEX_op_qemu_ld_i32:
|
||||
|
@ -2575,31 +2429,19 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
|
|||
case INDEX_op_qemu_st_i64:
|
||||
case INDEX_op_qemu_st_i32:
|
||||
return &L_L;
|
||||
|
||||
case INDEX_op_deposit_i32:
|
||||
case INDEX_op_deposit_i64:
|
||||
{
|
||||
static const TCGTargetOpDef dep = { 0, { "r", "rZ", "r" } };
|
||||
return &dep;
|
||||
}
|
||||
case INDEX_op_setcond_i32:
|
||||
case INDEX_op_setcond_i64:
|
||||
{
|
||||
/* Without EXT_IMM, only the LOAD AND TEST insn is available. */
|
||||
static const TCGTargetOpDef setc_z = { 0, { "r", "r", "rZ" } };
|
||||
static const TCGTargetOpDef setc_c = { 0, { "r", "r", "rC" } };
|
||||
return (s390_facilities & FACILITY_EXT_IMM ? &setc_c : &setc_z);
|
||||
}
|
||||
case INDEX_op_movcond_i32:
|
||||
case INDEX_op_movcond_i64:
|
||||
{
|
||||
/* Without EXT_IMM, only the LOAD AND TEST insn is available. */
|
||||
static const TCGTargetOpDef movc_z = { 0, { "r", "r", "rZ", "r", "0" } };
|
||||
static const TCGTargetOpDef movc_c = { 0, { "r", "r", "rC", "r", "0" } };
|
||||
static const TCGTargetOpDef movc_l = { 0, { "r", "r", "rC", "rI", "0" } };
|
||||
return (s390_facilities & FACILITY_EXT_IMM
|
||||
? (s390_facilities & FACILITY_LOAD_ON_COND2
|
||||
? &movc_l : &movc_c)
|
||||
: &movc_z);
|
||||
static const TCGTargetOpDef movc = { 0, { "r", "r", "ri", "r", "0" } };
|
||||
static const TCGTargetOpDef movc_l = { 0, { "r", "r", "ri", "rI", "0" } };
|
||||
return (s390_facilities & FACILITY_LOAD_ON_COND2 ? &movc_l : &movc);
|
||||
}
|
||||
case INDEX_op_div2_i32:
|
||||
case INDEX_op_div2_i64:
|
||||
|
@ -2628,6 +2470,23 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void query_s390_facilities(void)
|
||||
{
|
||||
unsigned long hwcap = qemu_getauxval(AT_HWCAP);
|
||||
|
||||
/* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this
|
||||
is present on all 64-bit systems, but let's check for it anyway. */
|
||||
if (hwcap & HWCAP_S390_STFLE) {
|
||||
register int r0 __asm__("0");
|
||||
register void *r1 __asm__("1");
|
||||
|
||||
/* stfle 0(%r1) */
|
||||
r1 = &s390_facilities;
|
||||
asm volatile(".word 0xb2b0,0x1000"
|
||||
: "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc");
|
||||
}
|
||||
}
|
||||
|
||||
static void tcg_target_init(TCGContext *s)
|
||||
{
|
||||
query_s390_facilities();
|
||||
|
@ -2682,13 +2541,14 @@ static void tcg_target_qemu_prologue(TCGContext *s)
|
|||
#endif
|
||||
|
||||
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
|
||||
/* br %r3 (go to TB) */
|
||||
tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);
|
||||
if (USE_REG_TB) {
|
||||
tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB,
|
||||
tcg_target_call_iarg_regs[1]);
|
||||
}
|
||||
|
||||
/* br %r3 (go to TB) */
|
||||
tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);
|
||||
|
||||
/*
|
||||
* Return path for goto_ptr. Set return value to 0, a-la exit_tb,
|
||||
* and fall through to the rest of the epilogue.
|
||||
|
|
|
@ -83,10 +83,8 @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
|
|||
#define TCG_REG_T1 TCG_REG_G1
|
||||
#define TCG_REG_T2 TCG_REG_O7
|
||||
|
||||
#ifdef CONFIG_USE_GUEST_BASE
|
||||
#ifndef CONFIG_SOFTMMU
|
||||
# define TCG_GUEST_BASE_REG TCG_REG_I5
|
||||
#else
|
||||
# define TCG_GUEST_BASE_REG TCG_REG_G0
|
||||
#endif
|
||||
|
||||
#define TCG_REG_TB TCG_REG_I1
|
||||
|
@ -311,7 +309,6 @@ static void patch_reloc(tcg_insn_unit *code_ptr, int type,
|
|||
case R_SPARC_WDISP19:
|
||||
assert(check_fit_ptr(pcrel >> 2, 19));
|
||||
insn &= ~INSN_OFF19(-1);
|
||||
insn |= INSN_OFF19(value);
|
||||
insn |= INSN_OFF19(pcrel);
|
||||
break;
|
||||
case R_SPARC_13:
|
||||
|
@ -421,7 +418,7 @@ static inline void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
|
|||
}
|
||||
|
||||
static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
|
||||
int32_t val2, int val2const, int op)
|
||||
int32_t val2, int val2const, int op)
|
||||
{
|
||||
tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
|
||||
| (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
|
||||
|
@ -751,7 +748,7 @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
|
|||
}
|
||||
c1 = TCG_REG_G0, c2const = 0;
|
||||
cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
|
||||
break;
|
||||
break;
|
||||
|
||||
case TCG_COND_GTU:
|
||||
case TCG_COND_LEU:
|
||||
|
@ -852,16 +849,16 @@ static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
|
|||
}
|
||||
tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
|
||||
} else if (bh == TCG_REG_G0) {
|
||||
/* If we have a zero, we can perform the operation in two insns,
|
||||
/* If we have a zero, we can perform the operation in two insns,
|
||||
with the arithmetic first, and a conditional move into place. */
|
||||
if (rh == ah) {
|
||||
if (rh == ah) {
|
||||
tcg_out_arithi(s, TCG_REG_T2, ah, 1,
|
||||
is_sub ? ARITH_SUB : ARITH_ADD);
|
||||
is_sub ? ARITH_SUB : ARITH_ADD);
|
||||
tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
|
||||
} else {
|
||||
} else {
|
||||
tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
|
||||
tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
|
||||
}
|
||||
tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
|
||||
}
|
||||
} else {
|
||||
/* Otherwise adjust BH as if there is carry into T2 ... */
|
||||
if (bhconst) {
|
||||
|
@ -872,7 +869,7 @@ static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
|
|||
}
|
||||
/* ... smoosh T2 back to original BH if carry is clear ... */
|
||||
tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
|
||||
/* ... and finally perform the arithmetic with the new operand. */
|
||||
/* ... and finally perform the arithmetic with the new operand. */
|
||||
tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
|
||||
}
|
||||
|
||||
|
@ -1022,7 +1019,7 @@ static void build_trampolines(TCGContext *s)
|
|||
/* Skip the oi argument. */
|
||||
ra += 1;
|
||||
}
|
||||
|
||||
|
||||
/* Set the retaddr operand. */
|
||||
if (ra >= TCG_REG_O6) {
|
||||
tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_O7, TCG_REG_CALL_STACK,
|
||||
|
@ -1132,7 +1129,7 @@ static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
|
|||
|
||||
/* Mask the tlb index. */
|
||||
tcg_out_arithi(s, r1, r1, CPU_TLB_SIZE - 1, ARITH_AND);
|
||||
|
||||
|
||||
/* Mask page, part 2. */
|
||||
tcg_out_arith(s, r0, addr, TCG_REG_T1, ARITH_AND);
|
||||
|
||||
|
@ -1207,7 +1204,7 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
|
|||
tcg_insn_unit *func;
|
||||
tcg_insn_unit *label_ptr;
|
||||
|
||||
addrz = tcg_out_tlb_load(s, addr, memi, memop & MO_SIZE,
|
||||
addrz = tcg_out_tlb_load(s, addr, memi, memop,
|
||||
offsetof(CPUTLBEntry, addr_read));
|
||||
|
||||
/* The fast path is exactly one insn. Thus we can perform the
|
||||
|
@ -1274,7 +1271,7 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
|
|||
addr = TCG_REG_T1;
|
||||
}
|
||||
tcg_out_ldst_rr(s, data, addr,
|
||||
(GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
|
||||
(guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
|
||||
qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
|
||||
#endif /* CONFIG_SOFTMMU */
|
||||
}
|
||||
|
@ -1289,7 +1286,7 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
|
|||
tcg_insn_unit *func;
|
||||
tcg_insn_unit *label_ptr;
|
||||
|
||||
addrz = tcg_out_tlb_load(s, addr, memi, memop & MO_SIZE,
|
||||
addrz = tcg_out_tlb_load(s, addr, memi, memop,
|
||||
offsetof(CPUTLBEntry, addr_write));
|
||||
|
||||
/* The fast path is exactly one insn. Thus we can perform the entire
|
||||
|
@ -1329,7 +1326,7 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
|
|||
addr = TCG_REG_T1;
|
||||
}
|
||||
tcg_out_ldst_rr(s, data, addr,
|
||||
(GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
|
||||
(guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
|
||||
qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
|
||||
#endif /* CONFIG_SOFTMMU */
|
||||
}
|
||||
|
@ -1487,11 +1484,11 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
|
|||
goto gen_arith;
|
||||
|
||||
OP_32_64(neg):
|
||||
c = ARITH_SUB;
|
||||
goto gen_arith1;
|
||||
c = ARITH_SUB;
|
||||
goto gen_arith1;
|
||||
OP_32_64(not):
|
||||
c = ARITH_ORN;
|
||||
goto gen_arith1;
|
||||
c = ARITH_ORN;
|
||||
goto gen_arith1;
|
||||
|
||||
case INDEX_op_div_i32:
|
||||
tcg_out_div32(s, a0, a1, a2, c2, 0);
|
||||
|
@ -1618,8 +1615,8 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
|
|||
break;
|
||||
|
||||
gen_arith1:
|
||||
tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
|
||||
break;
|
||||
tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
|
||||
break;
|
||||
|
||||
case INDEX_op_mb:
|
||||
tcg_out_mb(s, a0);
|
||||
|
@ -1662,6 +1659,7 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
|
|||
switch (op) {
|
||||
case INDEX_op_goto_ptr:
|
||||
return &r;
|
||||
|
||||
case INDEX_op_ld8u_i32:
|
||||
case INDEX_op_ld8s_i32:
|
||||
case INDEX_op_ld16u_i32:
|
||||
|
|
Loading…
Reference in a new issue