target/arm: Add arm_tlb_bti_gp

Introduce an lvalue macro to wrap target_tlb_bit0.

Backports commit 149d3b31f3f0f7f9e1c3a77043450a95c7a7e93d from qemu
This commit is contained in:
Richard Henderson 2021-02-25 17:44:11 -05:00 committed by Lioncash
parent a9eb62d211
commit 84012be55c
3 changed files with 15 additions and 2 deletions

View file

@ -3251,6 +3251,19 @@ static inline uint64_t *aa64_vfp_qreg(CPUARMState *env, unsigned regno)
/* Shared between translate-sve.c and sve_helper.c. */
extern const uint64_t pred_esz_masks[4];
/* Helper for the macros below, validating the argument type. */
static inline MemTxAttrs *typecheck_memtxattrs(MemTxAttrs *x)
{
return x;
}
/*
* Lvalue macros for ARM TLB bits that we must cache in the TCG TLB.
* Using these should be a bit more self-documenting than using the
* generic target bits directly.
*/
#define arm_tlb_bti_gp(x) (typecheck_memtxattrs(x)->target_tlb_bit0)
/*
* Naming convention for isar_feature functions:
* Functions which test 32-bit ID registers should have _aa32_ in

View file

@ -10850,7 +10850,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
}
/* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */
if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
txattrs->target_tlb_bit0 = true;
arm_tlb_bti_gp(txattrs) = true;
}
if (cacheattrs != NULL) {

View file

@ -14746,7 +14746,7 @@ static bool is_guarded_page(CPUARMState *env, DisasContext *s)
* table entry even for that case.
*/
return (tlb_hit(entry->addr_code, addr) &&
env->iotlb[mmu_idx][index].attrs.target_tlb_bit0);
arm_tlb_bti_gp(&env->iotlb[mmu_idx][index].attrs));
#endif
}