target/arm: Fix multiline comment syntax

Since commit 8c06fbdf36b checkpatch.pl enforce a new multiline
comment syntax. Since we'll move this code around, fix its style
first.

Backports commit 9a223097e44d5320f5e0546710263f22d11f12fc from qemu
This commit is contained in:
Philippe Mathieu-Daudé 2019-08-08 14:58:04 -04:00 committed by Lioncash
parent 0a5152caf8
commit 4f71266524
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7
3 changed files with 419 additions and 211 deletions

File diff suppressed because it is too large Load diff

View file

@ -96,7 +96,8 @@ static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
{ {
uint32_t syn; uint32_t syn;
/* ISV is only set for data aborts routed to EL2 and /*
* ISV is only set for data aborts routed to EL2 and
* never for stage-1 page table walks faulting on stage 2. * never for stage-1 page table walks faulting on stage 2.
* *
* Furthermore, ISV is only set for certain kinds of load/stores. * Furthermore, ISV is only set for certain kinds of load/stores.
@ -111,7 +112,8 @@ static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
syn = syn_data_abort_no_iss(same_el, syn = syn_data_abort_no_iss(same_el,
ea, 0, s1ptw, is_write, fsc); ea, 0, s1ptw, is_write, fsc);
} else { } else {
/* Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template /*
* Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
* syndrome created at translation time. * syndrome created at translation time.
* Now we create the runtime syndrome with the remaining fields. * Now we create the runtime syndrome with the remaining fields.
*/ */
@ -143,14 +145,16 @@ void arm_deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
if (target_el == 2 || arm_el_is_aa64(env, target_el) || if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) { arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
/* LPAE format fault status register : bottom 6 bits are /*
* LPAE format fault status register : bottom 6 bits are
* status code in the same form as needed for syndrome * status code in the same form as needed for syndrome
*/ */
fsr = arm_fi_to_lfsc(fi); fsr = arm_fi_to_lfsc(fi);
fsc = extract32(fsr, 0, 6); fsc = extract32(fsr, 0, 6);
} else { } else {
fsr = arm_fi_to_sfsc(fi); fsr = arm_fi_to_sfsc(fi);
/* Short format FSR : this fault will never actually be reported /*
* Short format FSR : this fault will never actually be reported
* to an EL that uses a syndrome register. Use a (currently) * to an EL that uses a syndrome register. Use a (currently)
* reserved FSR code in case the constructed syndrome does leak * reserved FSR code in case the constructed syndrome does leak
* into the guest somehow. * into the guest somehow.
@ -193,7 +197,8 @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi); arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
} }
/* arm_cpu_do_transaction_failed: handle a memory system error response /*
* arm_cpu_do_transaction_failed: handle a memory system error response
* (eg "no device/memory present at address") by raising an external abort * (eg "no device/memory present at address") by raising an external abort
* exception * exception
*/ */
@ -369,7 +374,8 @@ void HELPER(setend)(CPUARMState *env)
env->uncached_cpsr ^= CPSR_E; env->uncached_cpsr ^= CPSR_E;
} }
/* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped. /*
* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
* The function returns the target EL (1-3) if the instruction is to be trapped; * The function returns the target EL (1-3) if the instruction is to be trapped;
* otherwise it returns 0 indicating it is not trapped. * otherwise it returns 0 indicating it is not trapped.
*/ */
@ -383,7 +389,8 @@ static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
return 0; return 0;
} }
/* If we are currently in EL0 then we need to check if SCTLR is set up for /*
* If we are currently in EL0 then we need to check if SCTLR is set up for
* WFx instructions being trapped to EL1. These trap bits don't exist in v7. * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
*/ */
if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) { if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
@ -402,7 +409,8 @@ static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
} }
} }
/* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it /*
* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
* No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
* bits will be zero indicating no trap. * bits will be zero indicating no trap.
*/ */
@ -430,7 +438,8 @@ void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
int target_el = check_wfx_trap(env, false); int target_el = check_wfx_trap(env, false);
if (cpu_has_work(cs)) { if (cpu_has_work(cs)) {
/* Don't bother to go into our "low power state" if /*
* Don't bother to go into our "low power state" if
* we would just wake up immediately. * we would just wake up immediately.
*/ */
return; return;
@ -449,7 +458,8 @@ void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
void HELPER(wfe)(CPUARMState *env) void HELPER(wfe)(CPUARMState *env)
{ {
/* This is a hint instruction that is semantically different /*
* This is a hint instruction that is semantically different
* from YIELD even though we currently implement it identically. * from YIELD even though we currently implement it identically.
* Don't actually halt the CPU, just yield back to top * Don't actually halt the CPU, just yield back to top
* level loop. This is not going into a "low power state" * level loop. This is not going into a "low power state"
@ -463,7 +473,8 @@ void HELPER(yield)(CPUARMState *env)
{ {
CPUState *cs = env_cpu(env); CPUState *cs = env_cpu(env);
/* This is a non-trappable hint instruction that generally indicates /*
* This is a non-trappable hint instruction that generally indicates
* that the guest is currently busy-looping. Yield control back to the * that the guest is currently busy-looping. Yield control back to the
* top level loop so that a more deserving VCPU has a chance to run. * top level loop so that a more deserving VCPU has a chance to run.
*/ */
@ -471,7 +482,8 @@ void HELPER(yield)(CPUARMState *env)
cpu_loop_exit(cs); cpu_loop_exit(cs);
} }
/* Raise an internal-to-QEMU exception. This is limited to only /*
* Raise an internal-to-QEMU exception. This is limited to only
* those EXCP values which are special cases for QEMU to interrupt * those EXCP values which are special cases for QEMU to interrupt
* execution and not to be used for exceptions which are passed to * execution and not to be used for exceptions which are passed to
* the guest (those must all have syndrome information and thus should * the guest (those must all have syndrome information and thus should
@ -493,14 +505,16 @@ void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
raise_exception(env, excp, syndrome, target_el); raise_exception(env, excp, syndrome, target_el);
} }
/* Raise an EXCP_BKPT with the specified syndrome register value, /*
* Raise an EXCP_BKPT with the specified syndrome register value,
* targeting the correct exception level for debug exceptions. * targeting the correct exception level for debug exceptions.
*/ */
void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome) void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome)
{ {
/* FSR will only be used if the debug target EL is AArch32. */ /* FSR will only be used if the debug target EL is AArch32. */
env->exception.fsr = arm_debug_exception_fsr(env); env->exception.fsr = arm_debug_exception_fsr(env);
/* FAR is UNKNOWN: clear vaddress to avoid potentially exposing /*
* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
* values to the guest that it shouldn't be able to see at its * values to the guest that it shouldn't be able to see at its
* exception/security level. * exception/security level.
*/ */
@ -525,7 +539,8 @@ void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn); cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn);
/* Generated code has already stored the new PC value, but /*
* Generated code has already stored the new PC value, but
* without masking out its low bits, because which bits need * without masking out its low bits, because which bits need
* masking depends on whether we're returning to Thumb or ARM * masking depends on whether we're returning to Thumb or ARM
* state. Do the masking now. * state. Do the masking now.
@ -579,7 +594,8 @@ void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode) uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
{ {
if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) { if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
/* SRS instruction is UNPREDICTABLE from System mode; we UNDEF. /*
* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
* Other UNPREDICTABLE and UNDEF cases were caught at translate time. * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
*/ */
raise_exception(env, EXCP_UDEF, syn_uncategorized(), raise_exception(env, EXCP_UDEF, syn_uncategorized(),
@ -596,7 +612,8 @@ uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode, static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
uint32_t regno) uint32_t regno)
{ {
/* Raise an exception if the requested access is one of the UNPREDICTABLE /*
* Raise an exception if the requested access is one of the UNPREDICTABLE
* cases; otherwise return. This broadly corresponds to the pseudocode * cases; otherwise return. This broadly corresponds to the pseudocode
* BankedRegisterAccessValid() and SPSRAccessValid(), * BankedRegisterAccessValid() and SPSRAccessValid(),
* except that we have already handled some cases at translate time. * except that we have already handled some cases at translate time.
@ -743,7 +760,8 @@ void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
target_el = exception_target_el(env); target_el = exception_target_el(env);
break; break;
case CP_ACCESS_TRAP_EL2: case CP_ACCESS_TRAP_EL2:
/* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is /*
* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is
* a bug in the access function. * a bug in the access function.
*/ */
assert(!arm_is_secure(env) && arm_current_el(env) != 3); assert(!arm_is_secure(env) && arm_current_el(env) != 3);
@ -766,7 +784,8 @@ void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
break; break;
case CP_ACCESS_TRAP_FP_EL2: case CP_ACCESS_TRAP_FP_EL2:
target_el = 2; target_el = 2;
/* Since we are an implementation that takes exceptions on a trapped /*
* Since we are an implementation that takes exceptions on a trapped
* conditional insn only if the insn has passed its condition code * conditional insn only if the insn has passed its condition code
* check, we take the IMPDEF choice to always report CV=1 COND=0xe * check, we take the IMPDEF choice to always report CV=1 COND=0xe
* (which is also the required value for AArch64 traps). * (which is also the required value for AArch64 traps).
@ -821,7 +840,8 @@ void HELPER(pre_hvc)(CPUARMState *env)
bool undef; bool undef;
if (arm_is_psci_call(cpu, EXCP_HVC)) { if (arm_is_psci_call(cpu, EXCP_HVC)) {
/* If PSCI is enabled and this looks like a valid PSCI call then /*
* If PSCI is enabled and this looks like a valid PSCI call then
* that overrides the architecturally mandated HVC behaviour. * that overrides the architecturally mandated HVC behaviour.
*/ */
return; return;
@ -837,7 +857,8 @@ void HELPER(pre_hvc)(CPUARMState *env)
undef = env->cp15.hcr_el2 & HCR_HCD; undef = env->cp15.hcr_el2 & HCR_HCD;
} }
/* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state. /*
* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
* For ARMv8/AArch64, HVC is allowed in EL3. * For ARMv8/AArch64, HVC is allowed in EL3.
* Note that we've already trapped HVC from EL0 at translation * Note that we've already trapped HVC from EL0 at translation
* time. * time.
@ -889,7 +910,8 @@ void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
* Conduit not SMC Undef insn Undef insn * Conduit not SMC Undef insn Undef insn
*/ */
/* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state. /*
* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
* On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
* extensions, SMD only applies to NS state. * extensions, SMD only applies to NS state.
* On ARMv7 without the Virtualization extensions, the SMD bit * On ARMv7 without the Virtualization extensions, the SMD bit
@ -901,7 +923,8 @@ void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
if (!arm_feature(env, ARM_FEATURE_EL3) && if (!arm_feature(env, ARM_FEATURE_EL3) &&
cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
/* If we have no EL3 then SMC always UNDEFs and can't be /*
* If we have no EL3 then SMC always UNDEFs and can't be
* trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3 * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3
* firmware within QEMU, and we want an EL2 guest to be able * firmware within QEMU, and we want an EL2 guest to be able
* to forbid its EL1 from making PSCI calls into QEMU's * to forbid its EL1 from making PSCI calls into QEMU's
@ -914,7 +937,8 @@ void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
} }
if (cur_el == 1 && (arm_hcr_el2_eff(env) & HCR_TSC)) { if (cur_el == 1 && (arm_hcr_el2_eff(env) & HCR_TSC)) {
/* In NS EL1, HCR controlled routing to EL2 has priority over SMD. /*
* In NS EL1, HCR controlled routing to EL2 has priority over SMD.
* We also want an EL2 guest to be able to forbid its EL1 from * We also want an EL2 guest to be able to forbid its EL1 from
* making PSCI calls into QEMU's "firmware" via HCR.TSC. * making PSCI calls into QEMU's "firmware" via HCR.TSC.
* This handles all the "Trap to EL2" cases of the previous table. * This handles all the "Trap to EL2" cases of the previous table.
@ -922,7 +946,8 @@ void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
raise_exception(env, EXCP_HYP_TRAP, syndrome, 2); raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
} }
/* Catch the two remaining "Undef insn" cases of the previous table: /*
* Catch the two remaining "Undef insn" cases of the previous table:
* - PSCI conduit is SMC but we don't have a valid PCSI call, * - PSCI conduit is SMC but we don't have a valid PCSI call,
* - We don't have EL3 or SMD is set. * - We don't have EL3 or SMD is set.
*/ */
@ -943,7 +968,8 @@ static bool linked_bp_matches(ARMCPU *cpu, int lbn)
int bt; int bt;
uint32_t contextidr; uint32_t contextidr;
/* Links to unimplemented or non-context aware breakpoints are /*
* Links to unimplemented or non-context aware breakpoints are
* CONSTRAINED UNPREDICTABLE: either behave as if disabled, or * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
* as if linked to an UNKNOWN context-aware breakpoint (in which * as if linked to an UNKNOWN context-aware breakpoint (in which
* case DBGWCR<n>_EL1.LBN must indicate that breakpoint). * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
@ -962,7 +988,8 @@ static bool linked_bp_matches(ARMCPU *cpu, int lbn)
bt = extract64(bcr, 20, 4); bt = extract64(bcr, 20, 4);
/* We match the whole register even if this is AArch32 using the /*
* We match the whole register even if this is AArch32 using the
* short descriptor format (in which case it holds both PROCID and ASID), * short descriptor format (in which case it holds both PROCID and ASID),
* since we don't implement the optional v7 context ID masking. * since we don't implement the optional v7 context ID masking.
*/ */
@ -979,7 +1006,8 @@ static bool linked_bp_matches(ARMCPU *cpu, int lbn)
case 9: /* linked VMID match (reserved if no EL2) */ case 9: /* linked VMID match (reserved if no EL2) */
case 11: /* linked context ID and VMID match (reserved if no EL2) */ case 11: /* linked context ID and VMID match (reserved if no EL2) */
default: default:
/* Links to Unlinked context breakpoints must generate no /*
* Links to Unlinked context breakpoints must generate no
* events; we choose to do the same for reserved values too. * events; we choose to do the same for reserved values too.
*/ */
return false; return false;
@ -993,7 +1021,8 @@ static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
CPUARMState *env = &cpu->env; CPUARMState *env = &cpu->env;
uint64_t cr; uint64_t cr;
int pac, hmc, ssc, wt, lbn; int pac, hmc, ssc, wt, lbn;
/* Note that for watchpoints the check is against the CPU security /*
* Note that for watchpoints the check is against the CPU security
* state, not the S/NS attribute on the offending data access. * state, not the S/NS attribute on the offending data access.
*/ */
bool is_secure = arm_is_secure(env); bool is_secure = arm_is_secure(env);
@ -1022,7 +1051,8 @@ static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
} }
cr = env->cp15.dbgbcr[n]; cr = env->cp15.dbgbcr[n];
} }
/* The WATCHPOINT_HIT flag guarantees us that the watchpoint is /*
* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
* enabled and that the address and access type match; for breakpoints * enabled and that the address and access type match; for breakpoints
* we know the address matched; check the remaining fields, including * we know the address matched; check the remaining fields, including
* linked breakpoints. We rely on WCR and BCR having the same layout * linked breakpoints. We rely on WCR and BCR having the same layout
@ -1090,7 +1120,8 @@ static bool check_watchpoints(ARMCPU *cpu)
CPUARMState *env = &cpu->env; CPUARMState *env = &cpu->env;
int n; int n;
/* If watchpoints are disabled globally or we can't take debug /*
* If watchpoints are disabled globally or we can't take debug
* exceptions here then watchpoint firings are ignored. * exceptions here then watchpoint firings are ignored.
*/ */
if (extract32(env->cp15.mdscr_el1, 15, 1) == 0 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
@ -1111,7 +1142,8 @@ static bool check_breakpoints(ARMCPU *cpu)
CPUARMState *env = &cpu->env; CPUARMState *env = &cpu->env;
int n; int n;
/* If breakpoints are disabled globally or we can't take debug /*
* If breakpoints are disabled globally or we can't take debug
* exceptions here then breakpoint firings are ignored. * exceptions here then breakpoint firings are ignored.
*/ */
if (extract32(env->cp15.mdscr_el1, 15, 1) == 0 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
@ -1138,7 +1170,8 @@ void HELPER(check_breakpoints)(CPUARMState *env)
bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp) bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
{ {
/* Called by core code when a CPU watchpoint fires; need to check if this /*
* Called by core code when a CPU watchpoint fires; need to check if this
* is also an architectural watchpoint match. * is also an architectural watchpoint match.
*/ */
ARMCPU *cpu = ARM_CPU(cs->uc, cs); ARMCPU *cpu = ARM_CPU(cs->uc, cs);
@ -1151,7 +1184,8 @@ vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
ARMCPU *cpu = ARM_CPU(cs->uc, cs); ARMCPU *cpu = ARM_CPU(cs->uc, cs);
CPUARMState *env = &cpu->env; CPUARMState *env = &cpu->env;
/* In BE32 system mode, target memory is stored byteswapped (on a /*
* In BE32 system mode, target memory is stored byteswapped (on a
* little-endian host system), and by the time we reach here (via an * little-endian host system), and by the time we reach here (via an
* opcode helper) the addresses of subword accesses have been adjusted * opcode helper) the addresses of subword accesses have been adjusted
* to account for that, which means that watchpoints will not match. * to account for that, which means that watchpoints will not match.
@ -1170,7 +1204,8 @@ vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
void arm_debug_excp_handler(CPUState *cs) void arm_debug_excp_handler(CPUState *cs)
{ {
/* Called by core code when a watchpoint or breakpoint fires; /*
* Called by core code when a watchpoint or breakpoint fires;
* need to check which one and raise the appropriate exception. * need to check which one and raise the appropriate exception.
*/ */
ARMCPU *cpu = ARM_CPU(cs->uc, cs); ARMCPU *cpu = ARM_CPU(cs->uc, cs);
@ -1194,7 +1229,8 @@ void arm_debug_excp_handler(CPUState *cs)
uint64_t pc = is_a64(env) ? env->pc : env->regs[15]; uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
bool same_el = (arm_debug_target_el(env) == arm_current_el(env)); bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
/* (1) GDB breakpoints should be handled first. /*
* (1) GDB breakpoints should be handled first.
* (2) Do not raise a CPU exception if no CPU breakpoint has fired, * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
* since singlestep is also done by generating a debug internal * since singlestep is also done by generating a debug internal
* exception. * exception.
@ -1205,7 +1241,8 @@ void arm_debug_excp_handler(CPUState *cs)
} }
env->exception.fsr = arm_debug_exception_fsr(env); env->exception.fsr = arm_debug_exception_fsr(env);
/* FAR is UNKNOWN: clear vaddress to avoid potentially exposing /*
* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
* values to the guest that it shouldn't be able to see at its * values to the guest that it shouldn't be able to see at its
* exception/security level. * exception/security level.
*/ */
@ -1216,9 +1253,11 @@ void arm_debug_excp_handler(CPUState *cs)
} }
} }
/* ??? Flag setting arithmetic is awkward because we need to do comparisons. /*
The only way to do that in TCG is a conditional branch, which clobbers * ??? Flag setting arithmetic is awkward because we need to do comparisons.
all our temporaries. For now implement these as helper functions. */ * The only way to do that in TCG is a conditional branch, which clobbers
* all our temporaries. For now implement these as helper functions.
*/
/* Similarly for variable shift instructions. */ /* Similarly for variable shift instructions. */

View file

@ -25,9 +25,11 @@
#include "internals.h" #include "internals.h"
/* VFP support. We follow the convention used for VFP instructions: /*
Single precision routines have a "s" suffix, double precision a * VFP support. We follow the convention used for VFP instructions:
"d" suffix. */ * Single precision routines have a "s" suffix, double precision a
* "d" suffix.
*/
/* Convert host exception flags to vfp form. */ /* Convert host exception flags to vfp form. */
static inline int vfp_exceptbits_from_host(int host_bits) static inline int vfp_exceptbits_from_host(int host_bits)
@ -170,7 +172,8 @@ void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16); set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16);
} }
/* The exception flags are ORed together when we read fpscr so we /*
* The exception flags are ORed together when we read fpscr so we
* only need to preserve the current state in one of our * only need to preserve the current state in one of our
* float_status values. * float_status values.
*/ */
@ -451,7 +454,8 @@ uint64_t HELPER(vfp_touqh)(uint32_t x, uint32_t shift, void *fpst)
shift, fpst); shift, fpst);
} }
/* Set the current fp rounding mode and return the old one. /*
* Set the current fp rounding mode and return the old one.
* The argument is a softfloat float_round_ value. * The argument is a softfloat float_round_ value.
*/ */
uint32_t HELPER(set_rmode)(uint32_t rmode, void *fpstp) uint32_t HELPER(set_rmode)(uint32_t rmode, void *fpstp)
@ -464,7 +468,8 @@ uint32_t HELPER(set_rmode)(uint32_t rmode, void *fpstp)
return prev_rmode; return prev_rmode;
} }
/* Set the current fp rounding mode in the standard fp status and return /*
* Set the current fp rounding mode in the standard fp status and return
* the old one. This is for NEON instructions that need to change the * the old one. This is for NEON instructions that need to change the
* rounding mode but wish to use the standard FPSCR values for everything * rounding mode but wish to use the standard FPSCR values for everything
* else. Always set the rounding mode back to the correct value after * else. Always set the rounding mode back to the correct value after
@ -484,7 +489,8 @@ uint32_t HELPER(set_neon_rmode)(uint32_t rmode, CPUARMState *env)
/* Half precision conversions. */ /* Half precision conversions. */
float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, void *fpstp, uint32_t ahp_mode) float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, void *fpstp, uint32_t ahp_mode)
{ {
/* Squash FZ16 to 0 for the duration of conversion. In this case, /*
* Squash FZ16 to 0 for the duration of conversion. In this case,
* it would affect flushing input denormals. * it would affect flushing input denormals.
*/ */
float_status *fpst = fpstp; float_status *fpst = fpstp;
@ -497,7 +503,8 @@ float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, void *fpstp, uint32_t ahp_mode)
uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, void *fpstp, uint32_t ahp_mode) uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, void *fpstp, uint32_t ahp_mode)
{ {
/* Squash FZ16 to 0 for the duration of conversion. In this case, /*
* Squash FZ16 to 0 for the duration of conversion. In this case,
* it would affect flushing output denormals. * it would affect flushing output denormals.
*/ */
float_status *fpst = fpstp; float_status *fpst = fpstp;
@ -510,7 +517,8 @@ uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, void *fpstp, uint32_t ahp_mode)
float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, void *fpstp, uint32_t ahp_mode) float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, void *fpstp, uint32_t ahp_mode)
{ {
/* Squash FZ16 to 0 for the duration of conversion. In this case, /*
* Squash FZ16 to 0 for the duration of conversion. In this case,
* it would affect flushing input denormals. * it would affect flushing input denormals.
*/ */
float_status *fpst = fpstp; float_status *fpst = fpstp;
@ -523,7 +531,8 @@ float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, void *fpstp, uint32_t ahp_mode)
uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, void *fpstp, uint32_t ahp_mode) uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, void *fpstp, uint32_t ahp_mode)
{ {
/* Squash FZ16 to 0 for the duration of conversion. In this case, /*
* Squash FZ16 to 0 for the duration of conversion. In this case,
* it would affect flushing output denormals. * it would affect flushing output denormals.
*/ */
float_status *fpst = fpstp; float_status *fpst = fpstp;
@ -568,21 +577,25 @@ float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env)
/* NEON helpers. */ /* NEON helpers. */
/* Constants 256 and 512 are used in some helpers; we avoid relying on /*
* int->float conversions at run-time. */ * Constants 256 and 512 are used in some helpers; we avoid relying on
* int->float conversions at run-time.
*/
#define float64_256 make_float64(0x4070000000000000LL) #define float64_256 make_float64(0x4070000000000000LL)
#define float64_512 make_float64(0x4080000000000000LL) #define float64_512 make_float64(0x4080000000000000LL)
#define float16_maxnorm make_float16(0x7bff) #define float16_maxnorm make_float16(0x7bff)
#define float32_maxnorm make_float32(0x7f7fffff) #define float32_maxnorm make_float32(0x7f7fffff)
#define float64_maxnorm make_float64(0x7fefffffffffffffLL) #define float64_maxnorm make_float64(0x7fefffffffffffffLL)
/* Reciprocal functions /*
* Reciprocal functions
* *
* The algorithm that must be used to calculate the estimate * The algorithm that must be used to calculate the estimate
* is specified by the ARM ARM, see FPRecipEstimate()/RecipEstimate * is specified by the ARM ARM, see FPRecipEstimate()/RecipEstimate
*/ */
/* See RecipEstimate() /*
* See RecipEstimate()
* *
* input is a 9 bit fixed point number * input is a 9 bit fixed point number
* input range 256 .. 511 for a number from 0.5 <= x < 1.0. * input range 256 .. 511 for a number from 0.5 <= x < 1.0.
@ -805,7 +818,8 @@ float64 HELPER(recpe_f64)(float64 input, void *fpstp)
return make_float64(f64_val); return make_float64(f64_val);
} }
/* The algorithm that must be used to calculate the estimate /*
* The algorithm that must be used to calculate the estimate
* is specified by the ARM ARM. * is specified by the ARM ARM.
*/ */
@ -887,8 +901,10 @@ uint32_t HELPER(rsqrte_f16)(uint32_t input, void *fpstp)
return float16_zero; return float16_zero;
} }
/* Scale and normalize to a double-precision value between 0.25 and 1.0, /*
* preserving the parity of the exponent. */ * Scale and normalize to a double-precision value between 0.25 and 1.0,
* preserving the parity of the exponent.
*/
f64_frac = ((uint64_t) f16_frac) << (52 - 10); f64_frac = ((uint64_t) f16_frac) << (52 - 10);
@ -931,8 +947,10 @@ float32 HELPER(rsqrte_f32)(float32 input, void *fpstp)
return float32_zero; return float32_zero;
} }
/* Scale and normalize to a double-precision value between 0.25 and 1.0, /*
* preserving the parity of the exponent. */ * Scale and normalize to a double-precision value between 0.25 and 1.0,
* preserving the parity of the exponent.
*/
f64_frac = ((uint64_t) f32_frac) << 29; f64_frac = ((uint64_t) f32_frac) << 29;