diff --git a/qemu/target/arm/cpu.h b/qemu/target/arm/cpu.h index a4090447..dfb824c9 100644 --- a/qemu/target/arm/cpu.h +++ b/qemu/target/arm/cpu.h @@ -3251,6 +3251,19 @@ static inline uint64_t *aa64_vfp_qreg(CPUARMState *env, unsigned regno) /* Shared between translate-sve.c and sve_helper.c. */ extern const uint64_t pred_esz_masks[4]; +/* Helper for the macros below, validating the argument type. */ +static inline MemTxAttrs *typecheck_memtxattrs(MemTxAttrs *x) +{ + return x; +} + +/* + * Lvalue macros for ARM TLB bits that we must cache in the TCG TLB. + * Using these should be a bit more self-documenting than using the + * generic target bits directly. + */ +#define arm_tlb_bti_gp(x) (typecheck_memtxattrs(x)->target_tlb_bit0) + /* * Naming convention for isar_feature functions: * Functions which test 32-bit ID registers should have _aa32_ in diff --git a/qemu/target/arm/helper.c b/qemu/target/arm/helper.c index a99e84d2..79042ecf 100644 --- a/qemu/target/arm/helper.c +++ b/qemu/target/arm/helper.c @@ -10850,7 +10850,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, } /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */ if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) { - txattrs->target_tlb_bit0 = true; + arm_tlb_bti_gp(txattrs) = true; } if (cacheattrs != NULL) { diff --git a/qemu/target/arm/translate-a64.c b/qemu/target/arm/translate-a64.c index e92bd13b..4781dc12 100644 --- a/qemu/target/arm/translate-a64.c +++ b/qemu/target/arm/translate-a64.c @@ -14746,7 +14746,7 @@ static bool is_guarded_page(CPUARMState *env, DisasContext *s) * table entry even for that case. */ return (tlb_hit(entry->addr_code, addr) && - env->iotlb[mmu_idx][index].attrs.target_tlb_bit0); + arm_tlb_bti_gp(&env->iotlb[mmu_idx][index].attrs)); #endif }