2015-08-21 07:04:50 +00:00
|
|
|
#ifndef TARGET_ARM_TRANSLATE_H
|
|
|
|
#define TARGET_ARM_TRANSLATE_H
|
|
|
|
|
|
|
|
/* internal defines */
|
|
|
|
typedef struct DisasContext {
|
|
|
|
target_ulong pc;
|
|
|
|
uint32_t insn;
|
|
|
|
int is_jmp;
|
|
|
|
/* Nonzero if this instruction has been conditionally skipped. */
|
|
|
|
int condjmp;
|
|
|
|
/* The label that will be jumped to when the instruction is skipped. */
|
2018-02-09 19:10:32 +00:00
|
|
|
TCGLabel *condlabel;
|
2015-08-21 07:04:50 +00:00
|
|
|
/* Thumb-2 conditional execution bits. */
|
|
|
|
int condexec_mask;
|
|
|
|
int condexec_cond;
|
|
|
|
struct TranslationBlock *tb;
|
|
|
|
int singlestep_enabled;
|
|
|
|
int thumb;
|
2018-02-21 07:04:31 +00:00
|
|
|
int sctlr_b;
|
2018-02-21 07:20:42 +00:00
|
|
|
TCGMemOp be_data;
|
2015-08-21 07:04:50 +00:00
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
|
|
|
int user;
|
|
|
|
#endif
|
2018-02-12 16:13:11 +00:00
|
|
|
ARMMMUIdx mmu_idx; /* MMU index to use for normal loads/stores */
|
2018-02-26 12:58:04 +00:00
|
|
|
bool tbi0; /* TBI0 for EL0/1 or TBI for EL2/3 */
|
|
|
|
bool tbi1; /* TBI1 for EL0/1, not used for EL2/3 */
|
2018-02-11 22:48:50 +00:00
|
|
|
bool ns; /* Use non-secure CPREG bank on access */
|
2018-02-13 04:03:55 +00:00
|
|
|
int fp_excp_el; /* FP exception EL or 0 if enabled */
|
2018-02-15 16:15:49 +00:00
|
|
|
/* Flag indicating that exceptions from secure mode are routed to EL3. */
|
|
|
|
bool secure_routed_to_el3;
|
2015-08-21 07:04:50 +00:00
|
|
|
bool vfp_enabled; /* FP enabled via FPSCR.EN */
|
|
|
|
int vec_len;
|
|
|
|
int vec_stride;
|
2018-03-02 19:54:13 +00:00
|
|
|
bool v7m_handler_mode;
|
2015-08-21 07:04:50 +00:00
|
|
|
/* Immediate value in AArch32 SVC insn; must be set if is_jmp == DISAS_SWI
|
|
|
|
* so that top level loop can generate correct syndrome information.
|
|
|
|
*/
|
|
|
|
uint32_t svc_imm;
|
|
|
|
int aarch64;
|
|
|
|
int current_el;
|
|
|
|
GHashTable *cp_regs;
|
|
|
|
uint64_t features; /* CPU features bits */
|
|
|
|
/* Because unallocated encodings generate different exception syndrome
|
|
|
|
* information from traps due to FP being disabled, we can't do a single
|
|
|
|
* "is fp access disabled" check at a high level in the decode tree.
|
|
|
|
* To help in catching bugs where the access check was forgotten in some
|
|
|
|
* code path, we set this flag when the access check is done, and assert
|
|
|
|
* that it is set at the point where we actually touch the FP regs.
|
|
|
|
*/
|
|
|
|
bool fp_access_checked;
|
|
|
|
/* ARMv8 single-step state (this is distinct from the QEMU gdbstub
|
|
|
|
* single-step support).
|
|
|
|
*/
|
|
|
|
bool ss_active;
|
|
|
|
bool pstate_ss;
|
|
|
|
/* True if the insn just emitted was a load-exclusive instruction
|
|
|
|
* (necessary for syndrome information for single step exceptions),
|
|
|
|
* ie A64 LDX*, LDAX*, A32/T32 LDREX*, LDAEX*.
|
|
|
|
*/
|
|
|
|
bool is_ldex;
|
|
|
|
/* True if a single-step exception will be taken to the current EL */
|
|
|
|
bool ss_same_el;
|
|
|
|
/* Bottom two bits of XScale c15_cpar coprocessor access control reg */
|
|
|
|
int c15_cpar;
|
2018-02-24 21:38:49 +00:00
|
|
|
/* TCG op index of the current insn_start. */
|
|
|
|
int insn_start_idx;
|
2015-08-21 07:04:50 +00:00
|
|
|
#define TMP_A64_MAX 16
|
|
|
|
int tmp_a64_count;
|
|
|
|
TCGv_i64 tmp_a64[TMP_A64_MAX];
|
|
|
|
|
|
|
|
// Unicorn engine
|
|
|
|
struct uc_struct *uc;
|
|
|
|
} DisasContext;
|
|
|
|
|
2018-02-11 04:42:08 +00:00
|
|
|
typedef struct DisasCompare {
|
|
|
|
TCGCond cond;
|
|
|
|
TCGv_i32 value;
|
|
|
|
bool value_global;
|
|
|
|
} DisasCompare;
|
2015-08-21 07:04:50 +00:00
|
|
|
|
|
|
|
static inline int arm_dc_feature(DisasContext *dc, int feature)
|
|
|
|
{
|
|
|
|
return (dc->features & (1ULL << feature)) != 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int get_mem_index(DisasContext *s)
|
|
|
|
{
|
2018-03-02 20:35:47 +00:00
|
|
|
return arm_to_core_mmu_idx(s->mmu_idx);
|
2015-08-21 07:04:50 +00:00
|
|
|
}
|
|
|
|
|
2018-02-13 03:08:30 +00:00
|
|
|
/* Function used to determine the target exception EL when otherwise not known
|
|
|
|
* or default.
|
|
|
|
*/
|
|
|
|
static inline int default_exception_el(DisasContext *s)
|
|
|
|
{
|
|
|
|
/* If we are coming from secure EL0 in a system with a 32-bit EL3, then
|
|
|
|
* there is no secure EL1, so we route exceptions to EL3. Otherwise,
|
|
|
|
* exceptions can only be routed to ELs above 1, so we target the higher of
|
|
|
|
* 1 or the current EL.
|
|
|
|
*/
|
2018-02-15 16:15:49 +00:00
|
|
|
return (s->mmu_idx == ARMMMUIdx_S1SE0 && s->secure_routed_to_el3)
|
2018-02-13 03:08:30 +00:00
|
|
|
? 3 : MAX(1, s->current_el);
|
|
|
|
}
|
|
|
|
|
2018-03-02 05:36:06 +00:00
|
|
|
static void disas_set_insn_syndrome(DisasContext *s, uint32_t syn)
|
|
|
|
{
|
|
|
|
TCGContext *tcg_ctx = s->uc->tcg_ctx;
|
|
|
|
|
|
|
|
/* We don't need to save all of the syndrome so we mask and shift
|
|
|
|
* out unneeded bits to help the sleb128 encoder do a better job.
|
|
|
|
*/
|
|
|
|
syn &= ARM_INSN_START_WORD2_MASK;
|
|
|
|
syn >>= ARM_INSN_START_WORD2_SHIFT;
|
|
|
|
|
|
|
|
/* We check and clear insn_start_idx to catch multiple updates. */
|
|
|
|
assert(s->insn_start_idx != 0);
|
|
|
|
tcg_set_insn_param(tcg_ctx, s->insn_start_idx, 2, syn);
|
|
|
|
s->insn_start_idx = 0;
|
|
|
|
}
|
|
|
|
|
2015-08-21 07:04:50 +00:00
|
|
|
/* target-specific extra values for is_jmp */
|
|
|
|
/* These instructions trap after executing, so the A32/T32 decoder must
|
|
|
|
* defer them until after the conditional execution state has been updated.
|
|
|
|
* WFI also needs special handling when single-stepping.
|
|
|
|
*/
|
|
|
|
#define DISAS_WFI 4
|
|
|
|
#define DISAS_SWI 5
|
|
|
|
/* For instructions which unconditionally cause an exception we can skip
|
|
|
|
* emitting unreachable code at the end of the TB in the A64 decoder
|
|
|
|
*/
|
|
|
|
#define DISAS_EXC 6
|
|
|
|
/* WFE */
|
|
|
|
#define DISAS_WFE 7
|
|
|
|
#define DISAS_HVC 8
|
|
|
|
#define DISAS_SMC 9
|
2018-02-14 03:46:56 +00:00
|
|
|
#define DISAS_YIELD 10
|
arm: Implement M profile exception return properly
On M profile, return from exceptions happen when code in Handler mode
executes one of the following function call return instructions:
* POP or LDM which loads the PC
* LDR to PC
* BX register
and the new PC value is 0xFFxxxxxx.
QEMU tries to implement this by not treating the instruction
specially but then catching the attempt to execute from the magic
address value. This is not ideal, because:
* there are guest visible differences from the architecturally
specified behaviour (for instance jumping to 0xFFxxxxxx via a
different instruction should not cause an exception return but it
will in the QEMU implementation)
* we have to account for it in various places (like refusing to take
an interrupt if the PC is at a magic value, and making sure that
the MPU doesn't deny execution at the magic value addresses)
Drop these hacks, and instead implement exception return the way the
architecture specifies -- by having the relevant instructions check
for the magic value and raise the 'do an exception return' QEMU
internal exception immediately.
The effect on the generated code is minor:
bx lr, old code (and new code for Thread mode):
TCG:
mov_i32 tmp5,r14
movi_i32 tmp6,$0xfffffffffffffffe
and_i32 pc,tmp5,tmp6
movi_i32 tmp6,$0x1
and_i32 tmp5,tmp5,tmp6
st_i32 tmp5,env,$0x218
exit_tb $0x0
set_label $L0
exit_tb $0x7f2aabd61993
x86_64 generated code:
0x7f2aabe87019: mov %ebx,%ebp
0x7f2aabe8701b: and $0xfffffffffffffffe,%ebp
0x7f2aabe8701e: mov %ebp,0x3c(%r14)
0x7f2aabe87022: and $0x1,%ebx
0x7f2aabe87025: mov %ebx,0x218(%r14)
0x7f2aabe8702c: xor %eax,%eax
0x7f2aabe8702e: jmpq 0x7f2aabe7c016
bx lr, new code when in Handler mode:
TCG:
mov_i32 tmp5,r14
movi_i32 tmp6,$0xfffffffffffffffe
and_i32 pc,tmp5,tmp6
movi_i32 tmp6,$0x1
and_i32 tmp5,tmp5,tmp6
st_i32 tmp5,env,$0x218
movi_i32 tmp5,$0xffffffffff000000
brcond_i32 pc,tmp5,geu,$L1
exit_tb $0x0
set_label $L1
movi_i32 tmp5,$0x8
call exception_internal,$0x0,$0,env,tmp5
x86_64 generated code:
0x7fe8fa1264e3: mov %ebp,%ebx
0x7fe8fa1264e5: and $0xfffffffffffffffe,%ebx
0x7fe8fa1264e8: mov %ebx,0x3c(%r14)
0x7fe8fa1264ec: and $0x1,%ebp
0x7fe8fa1264ef: mov %ebp,0x218(%r14)
0x7fe8fa1264f6: cmp $0xff000000,%ebx
0x7fe8fa1264fc: jae 0x7fe8fa126509
0x7fe8fa126502: xor %eax,%eax
0x7fe8fa126504: jmpq 0x7fe8fa122016
0x7fe8fa126509: mov %r14,%rdi
0x7fe8fa12650c: mov $0x8,%esi
0x7fe8fa126511: mov $0x56095dbeccf5,%r10
0x7fe8fa12651b: callq *%r10
which is a difference of one cmp/branch-not-taken. This will
be lost in the noise of having to exit generated code and
look up the next TB anyway.
Backports commit 3bb8a96f5348913ee130169504f3642f501b113e from qemu
2018-03-02 19:56:27 +00:00
|
|
|
/* M profile branch which might be an exception return (and so needs
|
|
|
|
* custom end-of-TB code)
|
|
|
|
*/
|
|
|
|
#define DISAS_BX_EXCRET 11
|
2015-08-21 07:04:50 +00:00
|
|
|
|
|
|
|
#ifdef TARGET_AARCH64
|
|
|
|
void a64_translate_init(struct uc_struct *uc);
|
2018-02-16 14:59:58 +00:00
|
|
|
void gen_intermediate_code_a64(ARMCPU *cpu, TranslationBlock *tb);
|
2015-08-21 07:04:50 +00:00
|
|
|
void gen_a64_set_pc_im(DisasContext *s, uint64_t val);
|
|
|
|
#else
|
|
|
|
static inline void a64_translate_init(struct uc_struct *uc)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2018-02-16 14:59:58 +00:00
|
|
|
static inline void gen_intermediate_code_a64(ARMCPU *cpu, TranslationBlock *tb)
|
2015-08-21 07:04:50 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void gen_a64_set_pc_im(uint64_t val)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-02-11 04:42:08 +00:00
|
|
|
void arm_test_cc(TCGContext *tcg_ctx, DisasCompare *cmp, int cc);
|
|
|
|
void arm_free_cc(TCGContext *tcg_ctx, DisasCompare *cmp);
|
|
|
|
void arm_jump_cc(TCGContext *tcg_ctx, DisasCompare *cmp, TCGLabel *label);
|
2018-02-09 19:10:32 +00:00
|
|
|
void arm_gen_test_cc(TCGContext *tcg_ctx, int cc, TCGLabel *label);
|
2015-08-21 07:04:50 +00:00
|
|
|
|
|
|
|
#endif /* TARGET_ARM_TRANSLATE_H */
|