target/arm: Sychronize with qemu

Synchronizes with bits and pieces that were missed due to merging
incorrectly (sorry :<)
This commit is contained in:
Lioncash 2019-04-18 03:36:50 -04:00
parent 753b6601c5
commit 3521e72580
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7
10 changed files with 872 additions and 872 deletions

View file

@ -7,7 +7,7 @@
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*f
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
@ -156,8 +156,10 @@ static void arm_cpu_reset(CPUState *s)
acc->parent_reset(s);
memset(env, 0, offsetof(CPUARMState, end_reset_fields));
g_hash_table_foreach(cpu->cp_regs, cp_reg_reset, cpu);
g_hash_table_foreach(cpu->cp_regs, cp_reg_check_reset, cpu);
env->vfp.xregs[ARM_VFP_FPSID] = cpu->reset_fpsid;
env->vfp.xregs[ARM_VFP_MVFR0] = cpu->isar.mvfr0;
env->vfp.xregs[ARM_VFP_MVFR1] = cpu->isar.mvfr1;
@ -239,7 +241,6 @@ static void arm_cpu_reset(CPUState *s)
} else {
env->uncached_cpsr = ARM_CPU_MODE_SVC;
}
env->daif = PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F;
if (arm_feature(env, ARM_FEATURE_M)) {
@ -795,7 +796,6 @@ static int arm_cpu_realizefn(struct uc_struct *uc, DeviceState *dev, Error **err
if (!cpu->has_pmu) {
unset_feature(env, ARM_FEATURE_PMU);
}
if (arm_feature(env, ARM_FEATURE_PMU)) {
pmu_init(cpu);
@ -874,6 +874,8 @@ static int arm_cpu_realizefn(struct uc_struct *uc, DeviceState *dev, Error **err
register_cp_regs_for_features(cpu);
arm_cpu_register_gdb_regs_for_features(cpu);
init_cpreg_list(cpu);
#ifndef CONFIG_USER_ONLY
if (cpu->has_el3 || arm_feature(env, ARM_FEATURE_M_SECURITY)) {
cs->num_ases = 2;
@ -894,8 +896,6 @@ static int arm_cpu_realizefn(struct uc_struct *uc, DeviceState *dev, Error **err
}
#endif
init_cpreg_list(cpu);
qemu_init_vcpu(cs);
cpu_reset(cs);
@ -1245,9 +1245,12 @@ static void arm_v7m_class_init(struct uc_struct *uc, ObjectClass *oc, void *data
static const ARMCPRegInfo cortexr5_cp_reginfo[] = {
/* Dummy the TCM region regs for the moment */
{ "ATCM", 15,9,1, 0,0,0, 0,ARM_CP_CONST, PL1_RW },
{ "BTCM", 15,9,1, 0,0,1, 0,ARM_CP_CONST, PL1_RW },
{ "DCACHE_INVAL", 15,15,5, 0,0,0, 0, ARM_CP_NOP, PL1_W },
{ .name = "ATCM", .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
.access = PL1_RW, .type = ARM_CP_CONST },
{ .name = "BTCM", .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
.access = PL1_RW, .type = ARM_CP_CONST },
{ .name = "DCACHE_INVAL", .cp = 15, .opc1 = 0, .crn = 15, .crm = 5,
.opc2 = 0, .access = PL1_W, .type = ARM_CP_NOP },
REGINFO_SENTINEL
};
@ -1275,6 +1278,7 @@ static void cortex_r5_initfn(struct uc_struct *uc, Object *obj, void *opaque)
cpu->isar.id_isar5 = 0x0;
cpu->isar.id_isar6 = 0x0;
cpu->mp_is_up = true;
cpu->pmsav7_dregion = 16;
define_arm_cp_regs(cpu, cortexr5_cp_reginfo);
}
@ -1287,10 +1291,10 @@ static void cortex_r5f_initfn(struct uc_struct *uc, Object *obj, void *opaque)
}
static const ARMCPRegInfo cortexa8_cp_reginfo[] = {
{ "L2LOCKDOWN", 15,9,0, 0,1,0, 0,
ARM_CP_CONST, PL1_RW, 0, NULL, 0, },
{ "L2AUXCR", 15,9,0, 0,1,2, 0,
ARM_CP_CONST, PL1_RW, 0, NULL, 0, },
{ .name = "L2LOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 0,
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
{ .name = "L2AUXCR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 2,
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
REGINFO_SENTINEL
};
@ -1337,25 +1341,28 @@ static const ARMCPRegInfo cortexa9_cp_reginfo[] = {
/* power_control should be set to maximum latency. Again,
* default to 0 and set by private hook
*/
{ "A9_PWRCTL", 15,15,0, 0,0,0, 0,
0, PL1_RW, 0, NULL, 0, offsetof(CPUARMState, cp15.c15_power_control) },
{ "A9_DIAG", 15,15,0, 0,0,1, 0,
0, PL1_RW, 0, NULL, 0, offsetof(CPUARMState, cp15.c15_diagnostic) },
{ "A9_PWRDIAG",15,15,0, 0,0,2, 0,
0, PL1_RW, 0, NULL, 0, offsetof(CPUARMState, cp15.c15_power_diagnostic) },
{ "NEONBUSY", 15,15,1, 0,0,0, 0,
ARM_CP_CONST, PL1_RW, 0, NULL, 0, },
{ .name = "A9_PWRCTL", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .resetvalue = 0,
.fieldoffset = offsetof(CPUARMState, cp15.c15_power_control) },
{ .name = "A9_DIAG", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 1,
.access = PL1_RW, .resetvalue = 0,
.fieldoffset = offsetof(CPUARMState, cp15.c15_diagnostic) },
{ .name = "A9_PWRDIAG", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 2,
.access = PL1_RW, .resetvalue = 0,
.fieldoffset = offsetof(CPUARMState, cp15.c15_power_diagnostic) },
{ .name = "NEONBUSY", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
/* TLB lockdown control */
{ "TLB_LOCKR", 15,15,4, 0,5,2, 0,
ARM_CP_NOP, PL1_W, 0, NULL, 0 },
{ "TLB_LOCKW", 15,15,4, 0,5,4, 0,
ARM_CP_NOP, PL1_W, 0, NULL, 0, },
{ "TLB_VA", 15,15,5, 0,5,2, 0,
ARM_CP_CONST, PL1_RW, 0, NULL, 0, },
{ "TLB_PA", 15,15,6, 0,5,2, 0,
ARM_CP_CONST, PL1_RW, 0, NULL, 0 },
{ "TLB_ATTR", 15,15,7, 0,5,2, 0,
ARM_CP_CONST, PL1_RW, 0, NULL, 0, },
{ .name = "TLB_LOCKR", .cp = 15, .crn = 15, .crm = 4, .opc1 = 5, .opc2 = 2,
.access = PL1_W, .resetvalue = 0, .type = ARM_CP_NOP },
{ .name = "TLB_LOCKW", .cp = 15, .crn = 15, .crm = 4, .opc1 = 5, .opc2 = 4,
.access = PL1_W, .resetvalue = 0, .type = ARM_CP_NOP },
{ .name = "TLB_VA", .cp = 15, .crn = 15, .crm = 5, .opc1 = 5, .opc2 = 2,
.access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
{ .name = "TLB_PA", .cp = 15, .crn = 15, .crm = 6, .opc1 = 5, .opc2 = 2,
.access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
{ .name = "TLB_ATTR", .cp = 15, .crn = 15, .crm = 7, .opc1 = 5, .opc2 = 2,
.access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
REGINFO_SENTINEL
};
@ -1413,12 +1420,12 @@ static uint64_t a15_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
static const ARMCPRegInfo cortexa15_cp_reginfo[] = {
#ifndef CONFIG_USER_ONLY
{ "L2CTLR", 15,9,0, 0,1,2, 0,
0, PL1_RW, 0, NULL, 0, 0, {0, 0},
NULL, a15_l2ctlr_read, arm_cp_write_ignore, },
{ .name = "L2CTLR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 2,
.access = PL1_RW, .resetvalue = 0, .readfn = a15_l2ctlr_read,
.writefn = arm_cp_write_ignore, },
#endif
{ "L2ECTLR", 15,9,0, 0,1,3, 0,
ARM_CP_CONST, PL1_RW, 0, NULL, 0 },
{ .name = "L2ECTLR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 3,
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
REGINFO_SENTINEL
};
@ -1742,51 +1749,55 @@ typedef struct ARMCPUInfo {
static const ARMCPUInfo arm_cpus[] = {
#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
{ "arm926", arm926_initfn },
{ "arm946", arm946_initfn },
{ "arm1026", arm1026_initfn },
{ .name = "arm926", .initfn = arm926_initfn },
{ .name = "arm946", .initfn = arm946_initfn },
{ .name = "arm1026", .initfn = arm1026_initfn },
/* What QEMU calls "arm1136-r2" is actually the 1136 r0p2, i.e. an
* older core than plain "arm1136". In particular this does not
* have the v6K features.
*/
{ "arm1136-r2", arm1136_r2_initfn },
{ "arm1136", arm1136_initfn },
{ "arm1176", arm1176_initfn },
{ "arm11mpcore", arm11mpcore_initfn },
{ "cortex-m0", cortex_m0_initfn, arm_v7m_class_init },
{ "cortex-m3", cortex_m3_initfn, arm_v7m_class_init },
{ "cortex-m4", cortex_m4_initfn, arm_v7m_class_init },
{ "cortex-m33", cortex_m33_initfn, arm_v7m_class_init },
{ "cortex-r5", cortex_r5_initfn },
{ "cortex-r5f", cortex_r5f_initfn },
{ "cortex-a7", cortex_a7_initfn },
{ "cortex-a8", cortex_a8_initfn },
{ "cortex-a9", cortex_a9_initfn },
{ "cortex-a15", cortex_a15_initfn },
{ "ti925t", ti925t_initfn },
{ "sa1100", sa1100_initfn },
{ "sa1110", sa1110_initfn },
{ "pxa250", pxa250_initfn },
{ "pxa255", pxa255_initfn },
{ "pxa260", pxa260_initfn },
{ "pxa261", pxa261_initfn },
{ "pxa262", pxa262_initfn },
{ .name = "arm1136-r2", .initfn = arm1136_r2_initfn },
{ .name = "arm1136", .initfn = arm1136_initfn },
{ .name = "arm1176", .initfn = arm1176_initfn },
{ .name = "arm11mpcore", .initfn = arm11mpcore_initfn },
{ .name = "cortex-m0", .initfn = cortex_m0_initfn,
.class_init = arm_v7m_class_init },
{ .name = "cortex-m3", .initfn = cortex_m3_initfn,
.class_init = arm_v7m_class_init },
{ .name = "cortex-m4", .initfn = cortex_m4_initfn,
.class_init = arm_v7m_class_init },
{ .name = "cortex-m33", .initfn = cortex_m33_initfn,
.class_init = arm_v7m_class_init },
{ .name = "cortex-r5", .initfn = cortex_r5_initfn },
{ .name = "cortex-r5f", .initfn = cortex_r5f_initfn },
{ .name = "cortex-a7", .initfn = cortex_a7_initfn },
{ .name = "cortex-a8", .initfn = cortex_a8_initfn },
{ .name = "cortex-a9", .initfn = cortex_a9_initfn },
{ .name = "cortex-a15", .initfn = cortex_a15_initfn },
{ .name = "ti925t", .initfn = ti925t_initfn },
{ .name = "sa1100", .initfn = sa1100_initfn },
{ .name = "sa1110", .initfn = sa1110_initfn },
{ .name = "pxa250", .initfn = pxa250_initfn },
{ .name = "pxa255", .initfn = pxa255_initfn },
{ .name = "pxa260", .initfn = pxa260_initfn },
{ .name = "pxa261", .initfn = pxa261_initfn },
{ .name = "pxa262", .initfn = pxa262_initfn },
/* "pxa270" is an alias for "pxa270-a0" */
{ "pxa270", pxa270a0_initfn },
{ "pxa270-a0", pxa270a0_initfn },
{ "pxa270-a1", pxa270a1_initfn },
{ "pxa270-b0", pxa270b0_initfn },
{ "pxa270-b1", pxa270b1_initfn },
{ "pxa270-c0", pxa270c0_initfn },
{ "pxa270-c5", pxa270c5_initfn },
{ .name = "pxa270", .initfn = pxa270a0_initfn },
{ .name = "pxa270-a0", .initfn = pxa270a0_initfn },
{ .name = "pxa270-a1", .initfn = pxa270a1_initfn },
{ .name = "pxa270-b0", .initfn = pxa270b0_initfn },
{ .name = "pxa270-b1", .initfn = pxa270b1_initfn },
{ .name = "pxa270-c0", .initfn = pxa270c0_initfn },
{ .name = "pxa270-c5", .initfn = pxa270c5_initfn },
#ifndef TARGET_AARCH64
{ "max", arm_max_initfn },
{ .name = "max", .initfn = arm_max_initfn },
#endif
#ifdef CONFIG_USER_ONLY
{ "any", arm_max_initfn },
{ .name = "any", .initfn = arm_max_initfn },
#endif
#endif
{ NULL }
{ .name = NULL }
};
#ifdef CONFIG_USER_ONLY
@ -1818,7 +1829,6 @@ static void arm_cpu_class_init(struct uc_struct *uc, ObjectClass *oc, void *data
acc->parent_reset = cc->reset;
cc->reset = arm_cpu_reset;
cc->class_by_name = arm_cpu_class_by_name;
cc->class_by_name = arm_cpu_class_by_name;
cc->has_work = arm_cpu_has_work;

View file

@ -32,8 +32,6 @@
# define TARGET_LONG_BITS 32
#endif
#define TARGET_IS_BIENDIAN 1
/* ARM processors have a weak memory model */
#define TCG_GUEST_DEFAULT_MO (0)
@ -943,6 +941,7 @@ int cpu_arm_signal_handler(int host_signum, void *pinfo,
/**
* pmu_op_start/finish
* @env: CPUARMState
*
* Convert all PMU counters between their delta form (the typical mode when
* they are enabled) and the guest-visible values. These two calls must
* surround any action which might affect the counters.
@ -2403,7 +2402,7 @@ struct ARMCPRegInfo {
#define CPREG_FIELD64(env, ri) \
(*(uint64_t *)((char *)(env) + (ri)->fieldoffset))
#define REGINFO_SENTINEL { NULL, 0,0,0,0,0,0, 0, ARM_CP_SENTINEL, 0, 0, NULL, 0,0, {0, 0}, 0,0,0,0,0,0, }
#define REGINFO_SENTINEL { .type = ARM_CP_SENTINEL }
void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
const ARMCPRegInfo *regs, void *opaque);
@ -2523,54 +2522,6 @@ bool write_cpustate_to_list(ARMCPU *cpu);
# define TARGET_VIRT_ADDR_SPACE_BITS 32
#endif
/**
* arm_hcr_el2_imo(): Return the effective value of HCR_EL2.IMO.
* Depending on the values of HCR_EL2.E2H and TGE, this may be
* "behaves as 1 for all purposes other than direct read/write" or
* "behaves as 0 for all purposes other than direct read/write"
*/
static inline bool arm_hcr_el2_imo(CPUARMState *env)
{
switch (env->cp15.hcr_el2 & (HCR_TGE | HCR_E2H)) {
case HCR_TGE:
return true;
case HCR_TGE | HCR_E2H:
return false;
default:
return env->cp15.hcr_el2 & HCR_IMO;
}
}
/**
* arm_hcr_el2_fmo(): Return the effective value of HCR_EL2.FMO.
*/
static inline bool arm_hcr_el2_fmo(CPUARMState *env)
{
switch (env->cp15.hcr_el2 & (HCR_TGE | HCR_E2H)) {
case HCR_TGE:
return true;
case HCR_TGE | HCR_E2H:
return false;
default:
return env->cp15.hcr_el2 & HCR_FMO;
}
}
/**
* arm_hcr_el2_amo(): Return the effective value of HCR_EL2.AMO.
*/
static inline bool arm_hcr_el2_amo(CPUARMState *env)
{
switch (env->cp15.hcr_el2 & (HCR_TGE | HCR_E2H)) {
case HCR_TGE:
return true;
case HCR_TGE | HCR_E2H:
return false;
default:
return env->cp15.hcr_el2 & HCR_AMO;
}
}
static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
unsigned int target_el)
{
@ -2579,6 +2530,7 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
bool secure = arm_is_secure(env);
bool pstate_unmasked;
int8_t unmasked = 0;
uint64_t hcr_el2;
/* Don't take exceptions if they target a lower EL.
* This check should catch any exceptions that would not be taken but left
@ -2588,6 +2540,8 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
return false;
}
hcr_el2 = arm_hcr_el2_eff(env);
switch (excp_idx) {
case EXCP_FIQ:
pstate_unmasked = !(env->daif & PSTATE_F);
@ -2598,13 +2552,13 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
break;
case EXCP_VFIQ:
if (secure || !arm_hcr_el2_fmo(env) || (env->cp15.hcr_el2 & HCR_TGE)) {
if (secure || !(hcr_el2 & HCR_FMO) || (hcr_el2 & HCR_TGE)) {
/* VFIQs are only taken when hypervized and non-secure. */
return false;
}
return !(env->daif & PSTATE_F);
case EXCP_VIRQ:
if (secure || !arm_hcr_el2_imo(env) || (env->cp15.hcr_el2 & HCR_TGE)) {
if (secure || !(hcr_el2 & HCR_IMO) || (hcr_el2 & HCR_TGE)) {
/* VIRQs are only taken when hypervized and non-secure. */
return false;
}
@ -2644,7 +2598,7 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
* to the CPSR.F setting otherwise we further assess the state
* below.
*/
hcr = arm_hcr_el2_fmo(env);
hcr = hcr_el2 & HCR_FMO;
scr = (env->cp15.scr_el3 & SCR_FIQ);
/* When EL3 is 32-bit, the SCR.FW bit controls whether the
@ -2661,7 +2615,7 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
* when setting the target EL, so it does not have a further
* affect here.
*/
hcr = arm_hcr_el2_imo(env);
hcr = hcr_el2 & HCR_IMO;
scr = false;
break;
default:
@ -2988,9 +2942,6 @@ static inline bool aa32_generate_debug_exceptions(CPUARMState *env)
* since the pseudocode has it at all callsites except for the one in
* CheckSoftwareStep(), where it is elided because both branches would
* always return the same value.
*
* Parts of the pseudocode relating to EL2 and EL3 are omitted because we
* don't yet implement those exception levels or their associated trap bits.
*/
static inline bool arm_generate_debug_exceptions(CPUARMState *env)
{
@ -3011,6 +2962,59 @@ static inline bool arm_singlestep_active(CPUARMState *env)
&& arm_generate_debug_exceptions(env);
}
static inline bool arm_sctlr_b(CPUARMState *env)
{
return
/* We need not implement SCTLR.ITD in user-mode emulation, so
* let linux-user ignore the fact that it conflicts with SCTLR_B.
* This lets people run BE32 binaries with "-cpu any".
*/
#ifndef CONFIG_USER_ONLY
!arm_feature(env, ARM_FEATURE_V7) &&
#endif
(env->cp15.sctlr_el[1] & SCTLR_B) != 0;
}
static inline uint64_t arm_sctlr(CPUARMState *env, int el)
{
if (el == 0) {
/* FIXME: ARMv8.1-VHE S2 translation regime. */
return env->cp15.sctlr_el[1];
} else {
return env->cp15.sctlr_el[el];
}
}
/* Return true if the processor is in big-endian mode. */
static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)
{
/* In 32bit endianness is determined by looking at CPSR's E bit */
if (!is_a64(env)) {
return
#ifdef CONFIG_USER_ONLY
/* In system mode, BE32 is modelled in line with the
* architecture (as word-invariant big-endianness), where loads
* and stores are done little endian but from addresses which
* are adjusted by XORing with the appropriate constant. So the
* endianness to use for the raw data access is not affected by
* SCTLR.B.
* In user mode, however, we model BE32 as byte-invariant
* big-endianness (because user-only code cannot tell the
* difference), and so we need to use a data access endianness
* that depends on SCTLR.B.
*/
arm_sctlr_b(env) ||
#endif
((env->uncached_cpsr & CPSR_E) ? 1 : 0);
} else {
int cur_el = arm_current_el(env);
uint64_t sctlr = arm_sctlr(env, cur_el);
return (sctlr & (cur_el ? SCTLR_EE : SCTLR_E0E)) != 0;
}
}
#include "exec/cpu-all.h"
/* Bit usage in the TB flags field: bit 31 indicates whether we are
@ -3076,57 +3080,16 @@ static inline bool bswap_code(bool sctlr_b)
#endif
}
static inline bool arm_sctlr_b(CPUARMState *env)
#ifdef CONFIG_USER_ONLY
static inline bool arm_cpu_bswap_data(CPUARMState *env)
{
return
/* We need not implement SCTLR.ITD in user-mode emulation, so
* let linux-user ignore the fact that it conflicts with SCTLR_B.
* This lets people run BE32 binaries with "-cpu any".
*/
#ifndef CONFIG_USER_ONLY
!arm_feature(env, ARM_FEATURE_V7) &&
#ifdef TARGET_WORDS_BIGENDIAN
1 ^
#endif
(env->cp15.sctlr_el[1] & SCTLR_B) != 0;
arm_cpu_data_is_big_endian(env);
}
static inline uint64_t arm_sctlr(CPUARMState *env, int el)
{
if (el == 0) {
/* FIXME: ARMv8.1-VHE S2 translation regime. */
return env->cp15.sctlr_el[1];
} else {
return env->cp15.sctlr_el[el];
}
}
/* Return true if the processor is in big-endian mode. */
static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)
{
/* In 32bit endianness is determined by looking at CPSR's E bit */
if (!is_a64(env)) {
return
#ifdef CONFIG_USER_ONLY
/* In system mode, BE32 is modelled in line with the
* architecture (as word-invariant big-endianness), where loads
* and stores are done little endian but from addresses which
* are adjusted by XORing with the appropriate constant. So the
* endianness to use for the raw data access is not affected by
* SCTLR.B.
* In user mode, however, we model BE32 as byte-invariant
* big-endianness (because user-only code cannot tell the
* difference), and so we need to use a data access endianness
* that depends on SCTLR.B.
*/
arm_sctlr_b(env) ||
#endif
((env->uncached_cpsr & CPSR_E) ? 1 : 0);
} else {
int cur_el = arm_current_el(env);
uint64_t sctlr = arm_sctlr(env, cur_el);
return (sctlr & (cur_el ? SCTLR_EE : SCTLR_E0E)) != 0;
}
}
void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
target_ulong *cs_base, uint32_t *flags);

View file

@ -342,11 +342,11 @@ typedef struct ARMCPUInfo {
} ARMCPUInfo;
static const ARMCPUInfo aarch64_cpus[] = {
{ "cortex-a57", aarch64_a57_initfn },
{ "cortex-a53", aarch64_a53_initfn },
{ "cortex-a72", aarch64_a72_initfn },
{ "max", aarch64_max_initfn },
{ NULL }
{ .name = "cortex-a57", .initfn = aarch64_a57_initfn },
{ .name = "cortex-a53", .initfn = aarch64_a53_initfn },
{ .name = "cortex-a72", .initfn = aarch64_a72_initfn },
{ .name = "max", .initfn = aarch64_max_initfn },
{ .name = NULL }
};
static QEMU_UNUSED_FUNC bool aarch64_cpu_get_aarch64(Object *obj, Error **errp)

View file

@ -614,7 +614,6 @@ uint64_t HELPER(crc32c_64)(uint64_t acc, uint64_t val, uint32_t bytes)
return crc32c(acc, buf, bytes) ^ 0xffffffff;
}
/* Returns 0 on success; 1 otherwise. */
uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr,
uint64_t new_lo, uint64_t new_hi)
{
@ -723,7 +722,6 @@ void HELPER(casp_le_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr,
cmpv = int128_make128(env->xregs[rs], env->xregs[rs + 1]);
newv = int128_make128(new_lo, new_hi);
oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, oi, ra);
env->xregs[rs] = int128_getlo(oldv);
@ -745,7 +743,6 @@ void HELPER(casp_be_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr,
cmpv = int128_make128(env->xregs[rs + 1], env->xregs[rs]);
newv = int128_make128(new_lo, new_hi);
oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
env->xregs[rs + 1] = int128_getlo(oldv);
@ -1113,3 +1110,5 @@ uint32_t HELPER(sqrt_f16)(uint32_t a, void *fpstp)
return float16_sqrt(a, s);
}

View file

@ -1230,12 +1230,10 @@ static void pmccntr_op_finish(CPUARMState *env)
{
if (pmu_counter_enabled(env, 31)) {
uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
if (env->cp15.c9_pmcr & PMCRD) {
/* Increment once every 64 processor clock cycles */
prev_cycles /= 64;
}
env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
}
}
@ -1472,7 +1470,6 @@ static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
pmevcntr_op_finish(env, counter);
}
/* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
* PMSELR value is equal to or greater than the number of implemented
* counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
@ -2652,6 +2649,7 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
};
#else
/* In user-mode most of the generic timer registers are inaccessible
* however modern kernels (4.12+) allow access to cntvct_el0
*/
@ -2926,15 +2924,16 @@ static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
#endif
static const ARMCPRegInfo vapa_cp_reginfo[] = {
{ "PAR", 15,7,4, 0,0,0, 0,
0, PL1_RW, 0, NULL, 0, 0,
{ offsetoflow32(CPUARMState, cp15.par_s), offsetoflow32(CPUARMState, cp15.par_ns) },
NULL, NULL, par_write },
{ .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .resetvalue = 0,
.bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
offsetoflow32(CPUARMState, cp15.par_ns) },
.writefn = par_write },
#ifndef CONFIG_USER_ONLY
/* This underdecoding is safe because the reginfo is NO_RAW. */
{ "ATS", 15,7,8, 0,0,CP_ANY, 0,
ARM_CP_NO_RAW, PL1_W, 0, NULL, 0, 0, {0, 0},
ats_access, NULL, ats_write },
{ .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
.access = PL1_W, .accessfn = ats_access,
.writefn = ats_write, .type = ARM_CP_NO_RAW },
#endif
REGINFO_SENTINEL
};
@ -3565,7 +3564,7 @@ static CPAccessResult aa64_cacheop_access(CPUARMState *env,
*/
static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
uint64_t value)
{
// UNICORN: TODO: issue #642
#if 0
@ -6008,9 +6007,9 @@ void register_cp_regs_for_features(ARMCPU *cpu)
{ .name = "ID_AA64ZFR0_EL1" },
{ .name = "ID_AA64MMFR0_EL1",
.fixed_bits = 0x00000000ff000000 },
{ .name = "ID_AA64MMFR1_EL1" },
{ .name = "ID_AA64MMFR*_EL1_RESERVED",
.is_glob = true },
{ .name = "ID_AA64MMFR1_EL1" },
{ .name = "ID_AA64DFR0_EL1",
.fixed_bits = 0x0000000000000006 },
{ .name = "ID_AA64DFR1_EL1" },
@ -7101,11 +7100,11 @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
arm_feature(env, ARM_FEATURE_V8)) {
mask |= CPSR_IL;
val |= CPSR_IL;
}
qemu_log_mask(LOG_GUEST_ERROR,
"Illegal AArch32 mode switch attempt from %s to %s\n",
aarch32_mode_name(env->uncached_cpsr),
aarch32_mode_name(val));
}
} else {
qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n",
write_type == CPSRWriteExceptionReturn ?
@ -7937,12 +7936,15 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
* we might now want to take a different exception which
* targets a different security state, so try again from the top.
*/
qemu_log_mask(CPU_LOG_INT,
"...derived exception on callee-saves register stacking");
v7m_exception_taken(cpu, lr, true, true);
return;
}
if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) {
/* Vector load failed: derived exception */
qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load");
v7m_exception_taken(cpu, lr, true, true);
return;
}
@ -8042,12 +8044,19 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
bool exc_secure = false;
bool return_to_secure;
/* We can only get here from an EXCP_EXCEPTION_EXIT, and
* gen_bx_excret() enforces the architectural rule
* that jumps to magic addresses don't have magic behaviour unless
* we're in Handler mode (compare pseudocode BXWritePC()).
/* If we're not in Handler mode then jumps to magic exception-exit
* addresses don't have magic behaviour. However for the v8M
* security extensions the magic secure-function-return has to
* work in thread mode too, so to avoid doing an extra check in
* the generated code we allow exception-exit magic to also cause the
* internal exception and bring us here in thread mode. Correct code
* will never try to do this (the following insn fetch will always
* fault) so we the overhead of having taken an unnecessary exception
* doesn't matter.
*/
assert(arm_v7m_is_handler_mode(env));
if (!arm_v7m_is_handler_mode(env)) {
return;
}
/* In the spec pseudocode ExceptionReturn() is called directly
* from BXWritePC() and gets the full target PC value including
@ -8095,7 +8104,7 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
// Unicorn: commented out
//if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
// env->v7m.faultmask[es] = 0;
// env->v7m.faultmask[exc_secure] = 0;
//}
} else {
env->v7m.faultmask[M_REG_NS] = 0;
@ -8104,7 +8113,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
// Unicorn: if'd out
#if 0
switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception)) {
switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception,
exc_secure)) {
case -1:
/* attempt to exit an exception that isn't active */
ufault = true;
@ -8178,9 +8188,9 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
// Unicorn: commented out
//armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
v7m_exception_taken(cpu, excret, true, false);
qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
"stackframe: failed EXC_RETURN.ES validity check\n");
v7m_exception_taken(cpu, excret, true, false);
return;
}
@ -8191,9 +8201,9 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
// Unicorn: commented out
//armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
v7m_exception_taken(cpu, excret, true, false);
qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
"stackframe: failed exception return integrity check\n");
v7m_exception_taken(cpu, excret, true, false);
return;
}
@ -8260,10 +8270,10 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
// Unicorn: commented out
//armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
v7m_exception_taken(cpu, excret, true, false);
qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
"stackframe: failed exception return integrity "
"signature check\n");
v7m_exception_taken(cpu, excret, true, false);
return;
}
@ -8295,6 +8305,7 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
/* v7m_stack_read() pended a fault, so take it (as a tail
* chained exception on the same stack frame)
*/
qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n");
v7m_exception_taken(cpu, excret, true, false);
return;
}
@ -8332,10 +8343,10 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
//armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
// env->v7m.secure);
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
v7m_exception_taken(cpu, excret, true, false);
qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
"stackframe: failed exception return integrity "
"check\n");
v7m_exception_taken(cpu, excret, true, false);
return;
}
}
@ -8372,9 +8383,9 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
//armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
ignore_stackfaults = v7m_push_stack(cpu);
v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
"failed exception return integrity check\n");
v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
return;
}
@ -8461,26 +8472,24 @@ static void arm_log_exception(int idx)
if (qemu_loglevel_mask(CPU_LOG_INT)) {
const char *exc = NULL;
static const char * const excnames[] = {
NULL,
"Undefined Instruction",
"SVC",
"Prefetch Abort",
"Data Abort",
"IRQ",
"FIQ",
"Breakpoint",
"QEMU v7M exception exit",
"QEMU intercept of kernel commpage",
NULL,
"Hypervisor Call",
"Hypervisor Trap",
"Secure Monitor Call",
"Virtual IRQ",
"Virtual FIQ",
"Semihosting call",
"v7M NOCP UsageFault",
"v7M INVSTATE UsageFault",
"v8M STKOF UsageFault",
[EXCP_UDEF] = "Undefined Instruction",
[EXCP_SWI] = "SVC",
[EXCP_PREFETCH_ABORT] = "Prefetch Abort",
[EXCP_DATA_ABORT] = "Data Abort",
[EXCP_IRQ] = "IRQ",
[EXCP_FIQ] = "FIQ",
[EXCP_BKPT] = "Breakpoint",
[EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
[EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
[EXCP_HVC] = "Hypervisor Call",
[EXCP_HYP_TRAP] = "Hypervisor Trap",
[EXCP_SMC] = "Secure Monitor Call",
[EXCP_VIRQ] = "Virtual IRQ",
[EXCP_VFIQ] = "Virtual FIQ",
[EXCP_SEMIHOST] = "Semihosting call",
[EXCP_NOCP] = "v7M NOCP UsageFault",
[EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
[EXCP_STKOF] = "v8M STKOF UsageFault",
};
if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
@ -8629,15 +8638,15 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
handle it. */
switch (cs->exception_index) {
case EXCP_UDEF:
//armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
//armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
break;
case EXCP_NOCP:
//armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
//armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
break;
case EXCP_INVSTATE:
//armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
//armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
break;
case EXCP_STKOF:
@ -8646,7 +8655,7 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
break;
case EXCP_SWI:
/* The PC already points to the next instruction. */
//armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC);
//armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
break;
case EXCP_PREFETCH_ABORT:
case EXCP_DATA_ABORT:
@ -8710,7 +8719,7 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
break;
}
// Unicorn: commented out
//armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS);
//armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
break;
default:
/* All other FSR values are either MPU faults or "can't happen
@ -8731,7 +8740,8 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
break;
}
// Unicorn: commented out
//armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM);
//armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM,
// env->v7m.secure);
break;
}
break;
@ -8750,7 +8760,7 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
}
}
#endif
//armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG);
//armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false);
break;
case EXCP_IRQ:
break;
@ -8806,7 +8816,6 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
ignore_stackfaults = v7m_push_stack(cpu);
v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
qemu_log_mask(CPU_LOG_INT, "... as %d\n", env->v7m.exception);
}
/* Function used to synchronize QEMU's AArch64 register set with AArch32
@ -10580,7 +10589,6 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
param = aa64_va_parameters(env, address, mmu_idx,
access_type != MMU_INST_FETCH);
level = 0;
/* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it
* invalid.
*/
@ -12223,7 +12231,6 @@ void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
* 1K as an artefact of legacy v5 subpage support being present in the
* same QEMU executable.
*/
int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
// msvc doesnt allow non-constant array sizes, so we work out the size it would be
// TARGET_PAGE_SIZE is 1024
@ -12694,7 +12701,7 @@ ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
target_ulong *cs_base, uint32_t *pflags)
{
ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
ARMMMUIdx mmu_idx = arm_mmu_idx(env);
int current_el = arm_current_el(env);
int fp_el = fp_exception_el(env, current_el);
uint32_t flags = 0;

View file

@ -92,15 +92,15 @@
# Two operand with unused vector element size
@pd_pn_e0 ........ ........ ....... rn:4 . rd:4 &rr_esz esz=0
# Two operand
@pd_pn ........ esz:2 .. .... ....... rn:4 . rd:4 &rr_esz
@rd_rn ........ esz:2 ...... ...... rn:5 rd:5 &rr_esz
# Two operand with governing predicate, flags setting
@pd_pg_pn_s ........ . s:1 ...... .. pg:4 . rn:4 . rd:4 &rpr_s
@pd_pg_pn_s0 ........ . . ...... .. pg:4 . rn:4 . rd:4 &rpr_s s=0
# Two operand
@pd_pn ........ esz:2 .. .... ....... rn:4 . rd:4 &rr_esz
# Three operand with unused vector element size
@rd_rn_rm_e0 ........ ... rm:5 ... ... rn:5 rd:5 &rrr_esz esz=0
@ -130,14 +130,6 @@
@rd_pg4_rn_rm ........ esz:2 . rm:5 .. pg:4 rn:5 rd:5 &rprr_esz
@pd_pg_rn_rm ........ esz:2 . rm:5 ... pg:3 rn:5 . rd:4 &rprr_esz
# One register operand, with governing predicate, vector element size
@rd_pg_rn ........ esz:2 ... ... ... pg:3 rn:5 rd:5 &rpr_esz
@rd_pg4_pn ........ esz:2 ... ... .. pg:4 . rn:4 rd:5 &rpr_esz
@pd_pg_rn ........ esz:2 ... ... ... pg:3 rn:5 . rd:4 &rpr_esz
# One register operand, with governing predicate, no vector element size
@rd_pg_rn_e0 ........ .. ... ... ... pg:3 rn:5 rd:5 &rpr_esz esz=0
# Three register operand, with governing predicate, vector element size
@rda_pg_rn_rm ........ esz:2 . rm:5 ... pg:3 rn:5 rd:5 \
&rprrr_esz ra=%reg_movprfx
@ -146,6 +138,14 @@
@rdn_pg_rm_ra ........ esz:2 . ra:5 ... pg:3 rm:5 rd:5 \
&rprrr_esz rn=%reg_movprfx
# One register operand, with governing predicate, vector element size
@rd_pg_rn ........ esz:2 ... ... ... pg:3 rn:5 rd:5 &rpr_esz
@rd_pg4_pn ........ esz:2 ... ... .. pg:4 . rn:4 rd:5 &rpr_esz
@pd_pg_rn ........ esz:2 ... ... ... pg:3 rn:5 . rd:4 &rpr_esz
# One register operand, with governing predicate, no vector element size
@rd_pg_rn_e0 ........ .. ... ... ... pg:3 rn:5 rd:5 &rpr_esz esz=0
# Two register operands with a 6-bit signed immediate.
@rd_rn_i6 ........ ... rn:5 ..... imm:s6 rd:5 &rri
@ -346,16 +346,6 @@ FCMUO_ppzz 01100101 .. 0 ..... 110 ... ..... 0 .... @pd_pg_rn_rm
FACGE_ppzz 01100101 .. 0 ..... 110 ... ..... 1 .... @pd_pg_rn_rm
FACGT_ppzz 01100101 .. 0 ..... 111 ... ..... 1 .... @pd_pg_rn_rm
# SVE floating-point arithmetic with immediate (predicated)
FADD_zpzi 01100101 .. 011 000 100 ... 0000 . ..... @rdn_i1
FSUB_zpzi 01100101 .. 011 001 100 ... 0000 . ..... @rdn_i1
FMUL_zpzi 01100101 .. 011 010 100 ... 0000 . ..... @rdn_i1
FSUBR_zpzi 01100101 .. 011 011 100 ... 0000 . ..... @rdn_i1
FMAXNM_zpzi 01100101 .. 011 100 100 ... 0000 . ..... @rdn_i1
FMINNM_zpzi 01100101 .. 011 101 100 ... 0000 . ..... @rdn_i1
FMAX_zpzi 01100101 .. 011 110 100 ... 0000 . ..... @rdn_i1
FMIN_zpzi 01100101 .. 011 111 100 ... 0000 . ..... @rdn_i1
### SVE Integer Multiply-Add Group
# SVE integer multiply-add writing addend (predicated)
@ -831,6 +821,16 @@ FMULX 01100101 .. 00 1010 100 ... ..... ..... @rdn_pg_rm
FDIV 01100101 .. 00 1100 100 ... ..... ..... @rdm_pg_rn # FDIVR
FDIV 01100101 .. 00 1101 100 ... ..... ..... @rdn_pg_rm
# SVE floating-point arithmetic with immediate (predicated)
FADD_zpzi 01100101 .. 011 000 100 ... 0000 . ..... @rdn_i1
FSUB_zpzi 01100101 .. 011 001 100 ... 0000 . ..... @rdn_i1
FMUL_zpzi 01100101 .. 011 010 100 ... 0000 . ..... @rdn_i1
FSUBR_zpzi 01100101 .. 011 011 100 ... 0000 . ..... @rdn_i1
FMAXNM_zpzi 01100101 .. 011 100 100 ... 0000 . ..... @rdn_i1
FMINNM_zpzi 01100101 .. 011 101 100 ... 0000 . ..... @rdn_i1
FMAX_zpzi 01100101 .. 011 110 100 ... 0000 . ..... @rdn_i1
FMIN_zpzi 01100101 .. 011 111 100 ... 0000 . ..... @rdn_i1
# SVE floating-point trig multiply-add coefficient
FTMAD 01100101 esz:2 010 imm:3 100000 rm:5 rd:5 rn=%reg_movprfx
@ -1017,17 +1017,17 @@ PRF 1100010 -- 00 ----- 111 --- ----- 0 ----
### SVE Memory Store Group
# SVE contiguous store (scalar plus immediate)
# ST1B, ST1H, ST1W, ST1D; require msz <= esz
ST_zpri 1110010 .. esz:2 0.... 111 ... ..... ..... \
@rpri_store_msz nreg=0
# SVE store predicate register
STR_pri 1110010 11 0. ..... 000 ... ..... 0 .... @pd_rn_i9
# SVE store vector register
STR_zri 1110010 11 0. ..... 010 ... ..... ..... @rd_rn_i9
# SVE contiguous store (scalar plus immediate)
# ST1B, ST1H, ST1W, ST1D; require msz <= esz
ST_zpri 1110010 .. esz:2 0.... 111 ... ..... ..... \
@rpri_store_msz nreg=0
# SVE contiguous store (scalar plus scalar)
# ST1B, ST1H, ST1W, ST1D; require msz <= esz
# Enumerate msz lest we conflict with STR_zri.

File diff suppressed because it is too large Load diff

View file

@ -36,11 +36,6 @@
#include "translate-a64.h"
#ifdef CONFIG_USER_ONLY
static TCGv_i64 cpu_exclusive_test;
static TCGv_i32 cpu_exclusive_info;
#endif
static const char *regnames[] = {
"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
"x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
@ -109,12 +104,6 @@ void a64_translate_init(struct uc_struct *uc)
offsetof(CPUARMState, exclusive_val), "exclusive_val");
tcg_ctx->cpu_exclusive_high = tcg_global_mem_new_i64(uc->tcg_ctx, tcg_ctx->cpu_env,
offsetof(CPUARMState, exclusive_high), "exclusive_high");
#ifdef CONFIG_USER_ONLY
cpu_exclusive_test = tcg_global_mem_new_i64(uc->tcg_ctx, tcg_ctx->cpu_env,
offsetof(CPUARMState, exclusive_test), "exclusive_test");
cpu_exclusive_info = tcg_global_mem_new_i32(uc->tcg_ctx, tcg_ctx->cpu_env,
offsetof(CPUARMState, exclusive_info), "exclusive_info");
#endif
}
static inline int get_a64_user_mem_index(DisasContext *s)
@ -176,14 +165,13 @@ void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
int el = arm_current_el(env);
const char *ns_status;
cpu_fprintf(f, "PC=%016"PRIx64" SP=%016"PRIx64"\n",
env->pc, env->xregs[31]);
for (i = 0; i < 31; i++) {
cpu_fprintf(f, "X%02d=%016"PRIx64, i, env->xregs[i]);
if ((i % 4) == 3) {
cpu_fprintf(f, "\n");
cpu_fprintf(f, " PC=%016" PRIx64 " ", env->pc);
for (i = 0; i < 32; i++) {
if (i == 31) {
cpu_fprintf(f, " SP=%016" PRIx64 "\n", env->xregs[i]);
} else {
cpu_fprintf(f, " ");
cpu_fprintf(f, "X%02d=%016" PRIx64 "%s", i, env->xregs[i],
(i + 2) % 3 ? " " : "\n");
}
}
@ -192,7 +180,6 @@ void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
} else {
ns_status = "";
}
cpu_fprintf(f, "PSTATE=%08x %c%c%c%c %sEL%d%c",
psr,
psr & PSTATE_N ? 'N' : '-',
@ -202,6 +189,7 @@ void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
ns_status,
el,
psr & PSTATE_SP ? 'h' : 't');
if (cpu_isar_feature(aa64_bti, cpu)) {
cpu_fprintf(f, " BTYPE=%d", (psr & PSTATE_BTYPE) >> 10);
}
@ -10333,7 +10321,6 @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
unallocated_encoding(s);
return;
}
tcg_debug_assert(size <= 3);
if (!fp_access_check(s)) {
@ -13198,6 +13185,17 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
}
is_fp = 2;
break;
case 0x00: /* FMLAL */
case 0x04: /* FMLSL */
case 0x18: /* FMLAL2 */
case 0x1c: /* FMLSL2 */
if (is_scalar || size != MO_32 || !dc_isar_feature(aa64_fhm, s)) {
unallocated_encoding(s);
return;
}
size = MO_16;
/* is_fp, but we pass cpu_env not fp_status. */
break;
default:
unallocated_encoding(s);
return;
@ -13308,6 +13306,22 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
tcg_temp_free_ptr(tcg_ctx, fpst);
}
return;
case 0x00: /* FMLAL */
case 0x04: /* FMLSL */
case 0x18: /* FMLAL2 */
case 0x1c: /* FMLSL2 */
{
int is_s = extract32(opcode, 2, 1);
int is_2 = u;
int data = (index << 2) | (is_2 << 1) | is_s;
tcg_gen_gvec_3_ptr(tcg_ctx, vec_full_reg_offset(s, rd),
vec_full_reg_offset(s, rn),
vec_full_reg_offset(s, rm), tcg_ctx->cpu_env,
is_q ? 16 : 8, vec_full_reg_size(s),
data, gen_helper_gvec_fmlal_idx_a64);
}
return;
}
if (size == 3) {
@ -14730,11 +14744,11 @@ static void aarch64_tr_disas_log(const DisasContextBase *dcbase,
}
const TranslatorOps aarch64_translator_ops = {
aarch64_tr_init_disas_context,
aarch64_tr_tb_start,
aarch64_tr_insn_start,
aarch64_tr_breakpoint_check,
aarch64_tr_translate_insn,
aarch64_tr_tb_stop,
aarch64_tr_disas_log,
.init_disas_context = aarch64_tr_init_disas_context,
.tb_start = aarch64_tr_tb_start,
.insn_start = aarch64_tr_insn_start,
.breakpoint_check = aarch64_tr_breakpoint_check,
.translate_insn = aarch64_tr_translate_insn,
.tb_stop = aarch64_tr_tb_stop,
.disas_log = aarch64_tr_disas_log,
};

View file

@ -4858,6 +4858,11 @@ static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
static void gen_nop_hint(DisasContext *s, int val)
{
switch (val) {
/* When running in MTTCG we don't generate jumps to the yield and
* WFE helpers as it won't affect the scheduling of other vCPUs.
* If we wanted to more completely model WFE/SEV so we don't busy
* spin unnecessarily we would need to do something more involved.
*/
case 1: /* yield */
if (!s->uc->parallel_cpus) {
gen_set_pc_im(s, s->pc);
@ -5306,6 +5311,7 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
break;
case 2:
reg_idx = (insn >> 7) & 1;
stride = (insn & (1 << 6)) ? 2 : 1;
break;
default:
abort();
@ -5599,7 +5605,7 @@ static void gen_neon_narrow_op(DisasContext *s, int op, int u, int size,
#define NEON_3R_VABA 15
#define NEON_3R_VADD_VSUB 16
#define NEON_3R_VTST_VCEQ 17
#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
#define NEON_3R_VML 18 /* VMLA, VMLS */
#define NEON_3R_VMUL 19
#define NEON_3R_VPMAX 20
#define NEON_3R_VPMIN 21
@ -5615,38 +5621,38 @@ static void gen_neon_narrow_op(DisasContext *s, int op, int u, int size,
#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
static const uint8_t neon_3r_sizes[] = {
/*NEON_3R_VHADD*/ 0x7,
/*NEON_3R_VQADD*/ 0xf,
/*NEON_3R_VRHADD*/ 0x7,
/*NEON_3R_LOGIC*/ 0xf, /* size field encodes op type */
/*NEON_3R_VHSUB*/ 0x7,
/*NEON_3R_VQSUB*/ 0xf,
/*NEON_3R_VCGT*/ 0x7,
/*NEON_3R_VCGE*/ 0x7,
/*NEON_3R_VSHL*/ 0xf,
/*NEON_3R_VQSHL*/ 0xf,
/*NEON_3R_VRSHL*/ 0xf,
/*NEON_3R_VQRSHL*/ 0xf,
/*NEON_3R_VMAX*/ 0x7,
/*NEON_3R_VMIN*/ 0x7,
/*NEON_3R_VABD*/ 0x7,
/*NEON_3R_VABA*/ 0x7,
/*NEON_3R_VADD_VSUB*/ 0xf,
/*NEON_3R_VTST_VCEQ*/ 0x7,
/*NEON_3R_VML*/ 0x7,
/*NEON_3R_VMUL*/ 0x7,
/*NEON_3R_VPMAX*/ 0x7,
/*NEON_3R_VPMIN*/ 0x7,
/*NEON_3R_VQDMULH_VQRDMULH*/ 0x6,
/*NEON_3R_VPADD_VQRDMLAH*/ 0x7,
/*NEON_3R_SHA*/ 0xf, /* size field encodes op type */
/*NEON_3R_VFM_VQRDMLSH*/ 0x7, /* For VFM, size bit 1 encodes op */
/*NEON_3R_FLOAT_ARITH*/ 0x5, /* size bit 1 encodes op */
/*NEON_3R_FLOAT_MULTIPLY*/ 0x5, /* size bit 1 encodes op */
/*NEON_3R_FLOAT_CMP*/ 0x5, /* size bit 1 encodes op */
/*NEON_3R_FLOAT_ACMP*/ 0x5, /* size bit 1 encodes op */
/*NEON_3R_FLOAT_MINMAX*/ 0x5, /* size bit 1 encodes op */
/*NEON_3R_FLOAT_MISC*/ 0x5, /* size bit 1 encodes op */
[NEON_3R_VHADD] = 0x7,
[NEON_3R_VQADD] = 0xf,
[NEON_3R_VRHADD] = 0x7,
[NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
[NEON_3R_VHSUB] = 0x7,
[NEON_3R_VQSUB] = 0xf,
[NEON_3R_VCGT] = 0x7,
[NEON_3R_VCGE] = 0x7,
[NEON_3R_VSHL] = 0xf,
[NEON_3R_VQSHL] = 0xf,
[NEON_3R_VRSHL] = 0xf,
[NEON_3R_VQRSHL] = 0xf,
[NEON_3R_VMAX] = 0x7,
[NEON_3R_VMIN] = 0x7,
[NEON_3R_VABD] = 0x7,
[NEON_3R_VABA] = 0x7,
[NEON_3R_VADD_VSUB] = 0xf,
[NEON_3R_VTST_VCEQ] = 0x7,
[NEON_3R_VML] = 0x7,
[NEON_3R_VMUL] = 0x7,
[NEON_3R_VPMAX] = 0x7,
[NEON_3R_VPMIN] = 0x7,
[NEON_3R_VQDMULH_VQRDMULH] = 0x6,
[NEON_3R_VPADD_VQRDMLAH] = 0x7,
[NEON_3R_SHA] = 0xf, /* size field encodes op type */
[NEON_3R_VFM_VQRDMLSH] = 0x7, /* For VFM, size bit 1 encodes op */
[NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
[NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
[NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
[NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
[NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
[NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
};
/* Symbolic constants for op fields for Neon 2-register miscellaneous.
@ -5755,70 +5761,68 @@ static bool neon_2rm_is_v8_op(int op)
* op values will have no bits set they always UNDEF.
*/
static const uint8_t neon_2rm_sizes[] = {
/*NEON_2RM_VREV64*/ 0x7,
/*NEON_2RM_VREV32*/ 0x3,
/*NEON_2RM_VREV16*/ 0x1,
0,
/*NEON_2RM_VPADDL*/ 0x7,
/*NEON_2RM_VPADDL_U*/ 0x7,
/*NEON_2RM_AESE*/ 0x1,
/*NEON_2RM_AESMC*/ 0x1,
/*NEON_2RM_VCLS*/ 0x7,
/*NEON_2RM_VCLZ*/ 0x7,
/*NEON_2RM_VCNT*/ 0x1,
/*NEON_2RM_VMVN*/ 0x1,
/*NEON_2RM_VPADAL*/ 0x7,
/*NEON_2RM_VPADAL_U*/ 0x7,
/*NEON_2RM_VQABS*/ 0x7,
/*NEON_2RM_VQNEG*/ 0x7,
/*NEON_2RM_VCGT0*/ 0x7,
/*NEON_2RM_VCGE0*/ 0x7,
/*NEON_2RM_VCEQ0*/ 0x7,
/*NEON_2RM_VCLE0*/ 0x7,
/*NEON_2RM_VCLT0*/ 0x7,
/*NEON_2RM_SHA1H*/ 0x4,
/*NEON_2RM_VABS*/ 0x7,
/*NEON_2RM_VNEG*/ 0x7,
/*NEON_2RM_VCGT0_F*/ 0x4,
/*NEON_2RM_VCGE0_F*/ 0x4,
/*NEON_2RM_VCEQ0_F*/ 0x4,
/*NEON_2RM_VCLE0_F*/ 0x4,
/*NEON_2RM_VCLT0_F*/ 0x4,
0,
/*NEON_2RM_VABS_F*/ 0x4,
/*NEON_2RM_VNEG_F*/ 0x4,
/*NEON_2RM_VSWP*/ 0x1,
/*NEON_2RM_VTRN*/ 0x7,
/*NEON_2RM_VUZP*/ 0x7,
/*NEON_2RM_VZIP*/ 0x7,
/*NEON_2RM_VMOVN*/ 0x7,
/*NEON_2RM_VQMOVN*/ 0x7,
/*NEON_2RM_VSHLL*/ 0x7,
/*NEON_2RM_SHA1SU1*/ 0x4,
/*NEON_2RM_VRINTN*/ 0x4,
/*NEON_2RM_VRINTX*/ 0x4,
/*NEON_2RM_VRINTA*/ 0x4,
/*NEON_2RM_VRINTZ*/ 0x4,
/*NEON_2RM_VCVT_F16_F32*/ 0x2,
/*NEON_2RM_VRINTM*/ 0x4,
/*NEON_2RM_VCVT_F32_F16*/ 0x2,
/*NEON_2RM_VRINTP*/ 0x4,
/*NEON_2RM_VCVTAU*/ 0x4,
/*NEON_2RM_VCVTAS*/ 0x4,
/*NEON_2RM_VCVTNU*/ 0x4,
/*NEON_2RM_VCVTNS*/ 0x4,
/*NEON_2RM_VCVTPU*/ 0x4,
/*NEON_2RM_VCVTPS*/ 0x4,
/*NEON_2RM_VCVTMU*/ 0x4,
/*NEON_2RM_VCVTMS*/ 0x4,
/*NEON_2RM_VRECPE*/ 0x4,
/*NEON_2RM_VRSQRTE*/ 0x4,
/*NEON_2RM_VRECPE_F*/ 0x4,
/*NEON_2RM_VRSQRTE_F*/ 0x4,
/*NEON_2RM_VCVT_FS*/ 0x4,
/*NEON_2RM_VCVT_FU*/ 0x4,
/*NEON_2RM_VCVT_SF*/ 0x4,
/*NEON_2RM_VCVT_UF*/ 0x4,
[NEON_2RM_VREV64] = 0x7,
[NEON_2RM_VREV32] = 0x3,
[NEON_2RM_VREV16] = 0x1,
[NEON_2RM_VPADDL] = 0x7,
[NEON_2RM_VPADDL_U] = 0x7,
[NEON_2RM_AESE] = 0x1,
[NEON_2RM_AESMC] = 0x1,
[NEON_2RM_VCLS] = 0x7,
[NEON_2RM_VCLZ] = 0x7,
[NEON_2RM_VCNT] = 0x1,
[NEON_2RM_VMVN] = 0x1,
[NEON_2RM_VPADAL] = 0x7,
[NEON_2RM_VPADAL_U] = 0x7,
[NEON_2RM_VQABS] = 0x7,
[NEON_2RM_VQNEG] = 0x7,
[NEON_2RM_VCGT0] = 0x7,
[NEON_2RM_VCGE0] = 0x7,
[NEON_2RM_VCEQ0] = 0x7,
[NEON_2RM_VCLE0] = 0x7,
[NEON_2RM_VCLT0] = 0x7,
[NEON_2RM_SHA1H] = 0x4,
[NEON_2RM_VABS] = 0x7,
[NEON_2RM_VNEG] = 0x7,
[NEON_2RM_VCGT0_F] = 0x4,
[NEON_2RM_VCGE0_F] = 0x4,
[NEON_2RM_VCEQ0_F] = 0x4,
[NEON_2RM_VCLE0_F] = 0x4,
[NEON_2RM_VCLT0_F] = 0x4,
[NEON_2RM_VABS_F] = 0x4,
[NEON_2RM_VNEG_F] = 0x4,
[NEON_2RM_VSWP] = 0x1,
[NEON_2RM_VTRN] = 0x7,
[NEON_2RM_VUZP] = 0x7,
[NEON_2RM_VZIP] = 0x7,
[NEON_2RM_VMOVN] = 0x7,
[NEON_2RM_VQMOVN] = 0x7,
[NEON_2RM_VSHLL] = 0x7,
[NEON_2RM_SHA1SU1] = 0x4,
[NEON_2RM_VRINTN] = 0x4,
[NEON_2RM_VRINTX] = 0x4,
[NEON_2RM_VRINTA] = 0x4,
[NEON_2RM_VRINTZ] = 0x4,
[NEON_2RM_VCVT_F16_F32] = 0x2,
[NEON_2RM_VRINTM] = 0x4,
[NEON_2RM_VCVT_F32_F16] = 0x2,
[NEON_2RM_VRINTP] = 0x4,
[NEON_2RM_VCVTAU] = 0x4,
[NEON_2RM_VCVTAS] = 0x4,
[NEON_2RM_VCVTNU] = 0x4,
[NEON_2RM_VCVTNS] = 0x4,
[NEON_2RM_VCVTPU] = 0x4,
[NEON_2RM_VCVTPS] = 0x4,
[NEON_2RM_VCVTMU] = 0x4,
[NEON_2RM_VCVTMS] = 0x4,
[NEON_2RM_VRECPE] = 0x4,
[NEON_2RM_VRSQRTE] = 0x4,
[NEON_2RM_VRECPE_F] = 0x4,
[NEON_2RM_VRSQRTE_F] = 0x4,
[NEON_2RM_VCVT_FS] = 0x4,
[NEON_2RM_VCVT_FU] = 0x4,
[NEON_2RM_VCVT_SF] = 0x4,
[NEON_2RM_VCVT_UF] = 0x4,
};
@ -6140,27 +6144,24 @@ static void gen_shl_ins_vec(TCGContext *s, unsigned vece, TCGv_vec d, TCGv_vec a
const GVecGen2i sli_op[4] = {
{ .fni8 = gen_shl8_ins_i64,
.fniv = gen_shl_ins_vec,
.opc = INDEX_op_shli_vec,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.load_dest = true,
.opc = INDEX_op_shli_vec,
.vece = MO_8 },
{ .fni8 = gen_shl16_ins_i64,
.fniv = gen_shl_ins_vec,
.opc = INDEX_op_shli_vec,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.load_dest = true,
.opc = INDEX_op_shli_vec,
.vece = MO_16 },
{ .fni4 = gen_shl32_ins_i32,
.fniv = gen_shl_ins_vec,
.opc = INDEX_op_shli_vec,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.load_dest = true,
.opc = INDEX_op_shli_vec,
.vece = MO_32 },
{ .fni8 = gen_shl64_ins_i64,
.fniv = gen_shl_ins_vec,
.opc = INDEX_op_shli_vec,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.load_dest = true,
.opc = INDEX_op_shli_vec,
.vece = MO_64 },
};
@ -7436,6 +7437,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
for (pass = 0; pass <= q; ++pass) {
uint64_t val = 0;
int n;
for (n = 0; n < 8; n++) {
if (imm & (1 << (n + pass * 8))) {
val |= 0xffull << (n * 8);
@ -8068,7 +8070,6 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
{
TCGv_ptr fpst;
TCGv_i32 ahp;
if (!dc_isar_feature(aa32_fp16_spconv, s) ||
q || (rd & 1)) {
return 1;

View file

@ -1072,6 +1072,7 @@ int arm_rmode_to_sf(int rmode)
/* FIXME: add support for TIEAWAY and ODD */
qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n",
rmode);
/* fall through for now */
case FPROUNDING_TIEEVEN:
default:
rmode = float_round_nearest_even;