mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2024-12-23 17:55:27 +00:00
3e786526cf
Replace existing uses of check_data_tbi in translate-a64.c that perform multiple logical memory access. Leave the helper blank for now to reduce the patch size. Backports commit 73ceeb0011b23bac8bd2c09ebe3c18d034aa69ce from qemu
377 lines
12 KiB
C
377 lines
12 KiB
C
/*
|
|
* ARM v8.5-MemTag Operations
|
|
*
|
|
* Copyright (c) 2020 Linaro, Ltd.
|
|
*
|
|
* This library is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
* License as published by the Free Software Foundation; either
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
*
|
|
* This library is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
* Lesser General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
|
|
#include "qemu/osdep.h"
|
|
#include "cpu.h"
|
|
#include "internals.h"
|
|
#include "exec/exec-all.h"
|
|
#include "exec/cpu_ldst.h"
|
|
#include "exec/helper-proto.h"
|
|
|
|
|
|
static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude)
|
|
{
|
|
if (exclude == 0xffff) {
|
|
return 0;
|
|
}
|
|
if (offset == 0) {
|
|
while (exclude & (1 << tag)) {
|
|
tag = (tag + 1) & 15;
|
|
}
|
|
} else {
|
|
do {
|
|
do {
|
|
tag = (tag + 1) & 15;
|
|
} while (exclude & (1 << tag));
|
|
} while (--offset > 0);
|
|
}
|
|
return tag;
|
|
}
|
|
|
|
/**
|
|
* allocation_tag_mem:
|
|
* @env: the cpu environment
|
|
* @ptr_mmu_idx: the addressing regime to use for the virtual address
|
|
* @ptr: the virtual address for which to look up tag memory
|
|
* @ptr_access: the access to use for the virtual address
|
|
* @ptr_size: the number of bytes in the normal memory access
|
|
* @tag_access: the access to use for the tag memory
|
|
* @tag_size: the number of bytes in the tag memory access
|
|
* @ra: the return address for exception handling
|
|
*
|
|
* Our tag memory is formatted as a sequence of little-endian nibbles.
|
|
* That is, the byte at (addr >> (LOG2_TAG_GRANULE + 1)) contains two
|
|
* tags, with the tag at [3:0] for the lower addr and the tag at [7:4]
|
|
* for the higher addr.
|
|
*
|
|
* Here, resolve the physical address from the virtual address, and return
|
|
* a pointer to the corresponding tag byte. Exit with exception if the
|
|
* virtual address is not accessible for @ptr_access.
|
|
*
|
|
* The @ptr_size and @tag_size values may not have an obvious relation
|
|
* due to the alignment of @ptr, and the number of tag checks required.
|
|
*
|
|
* If there is no tag storage corresponding to @ptr, return NULL.
|
|
*/
|
|
static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
|
|
uint64_t ptr, MMUAccessType ptr_access,
|
|
int ptr_size, MMUAccessType tag_access,
|
|
int tag_size, uintptr_t ra)
|
|
{
|
|
/* Tag storage not implemented. */
|
|
return NULL;
|
|
}
|
|
|
|
uint64_t HELPER(irg)(CPUARMState *env, uint64_t rn, uint64_t rm)
|
|
{
|
|
int rtag;
|
|
|
|
/*
|
|
* Our IMPDEF choice for GCR_EL1.RRND==1 is to behave as if
|
|
* GCR_EL1.RRND==0, always producing deterministic results.
|
|
*/
|
|
uint16_t exclude = extract32(rm | env->cp15.gcr_el1, 0, 16);
|
|
int start = extract32(env->cp15.rgsr_el1, 0, 4);
|
|
int seed = extract32(env->cp15.rgsr_el1, 8, 16);
|
|
int offset, i;
|
|
|
|
/* RandomTag */
|
|
for (i = offset = 0; i < 4; ++i) {
|
|
/* NextRandomTagBit */
|
|
int top = (extract32(seed, 5, 1) ^ extract32(seed, 3, 1) ^
|
|
extract32(seed, 2, 1) ^ extract32(seed, 0, 1));
|
|
seed = (top << 15) | (seed >> 1);
|
|
offset |= top << i;
|
|
}
|
|
rtag = choose_nonexcluded_tag(start, offset, exclude);
|
|
env->cp15.rgsr_el1 = rtag | (seed << 8);
|
|
|
|
return address_with_allocation_tag(rn, rtag);
|
|
}
|
|
|
|
uint64_t HELPER(addsubg)(CPUARMState *env, uint64_t ptr,
|
|
int32_t offset, uint32_t tag_offset)
|
|
{
|
|
int start_tag = allocation_tag_from_addr(ptr);
|
|
uint16_t exclude = extract32(env->cp15.gcr_el1, 0, 16);
|
|
int rtag = choose_nonexcluded_tag(start_tag, tag_offset, exclude);
|
|
|
|
return address_with_allocation_tag(ptr + offset, rtag);
|
|
}
|
|
|
|
static int load_tag1(uint64_t ptr, uint8_t *mem)
|
|
{
|
|
int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
|
|
return extract32(*mem, ofs, 4);
|
|
}
|
|
|
|
uint64_t HELPER(ldg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
|
|
{
|
|
int mmu_idx = cpu_mmu_index(env, false);
|
|
uint8_t *mem;
|
|
int rtag = 0;
|
|
|
|
/* Trap if accessing an invalid page. */
|
|
mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD, 1,
|
|
MMU_DATA_LOAD, 1, GETPC());
|
|
|
|
/* Load if page supports tags. */
|
|
if (mem) {
|
|
rtag = load_tag1(ptr, mem);
|
|
}
|
|
|
|
return address_with_allocation_tag(xt, rtag);
|
|
}
|
|
|
|
static void check_tag_aligned(CPUARMState *env, uint64_t ptr, uintptr_t ra)
|
|
{
|
|
if (unlikely(!QEMU_IS_ALIGNED(ptr, TAG_GRANULE))) {
|
|
arm_cpu_do_unaligned_access(env_cpu(env), ptr, MMU_DATA_STORE,
|
|
cpu_mmu_index(env, false), ra);
|
|
g_assert_not_reached();
|
|
}
|
|
}
|
|
|
|
/* For use in a non-parallel context, store to the given nibble. */
|
|
static void store_tag1(uint64_t ptr, uint8_t *mem, int tag)
|
|
{
|
|
int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
|
|
*mem = deposit32(*mem, ofs, 4, tag);
|
|
}
|
|
|
|
/* For use in a parallel context, atomically store to the given nibble. */
|
|
static void store_tag1_parallel(uint64_t ptr, uint8_t *mem, int tag)
|
|
{
|
|
int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
|
|
uint8_t old = atomic_read(mem);
|
|
|
|
while (1) {
|
|
uint8_t new = deposit32(old, ofs, 4, tag);
|
|
uint8_t cmp = atomic_cmpxchg(mem, old, new);
|
|
if (likely(cmp == old)) {
|
|
return;
|
|
}
|
|
old = cmp;
|
|
}
|
|
}
|
|
|
|
typedef void stg_store1(uint64_t, uint8_t *, int);
|
|
|
|
static inline void do_stg(CPUARMState *env, uint64_t ptr, uint64_t xt,
|
|
uintptr_t ra, stg_store1 store1)
|
|
{
|
|
int mmu_idx = cpu_mmu_index(env, false);
|
|
uint8_t *mem;
|
|
|
|
check_tag_aligned(env, ptr, ra);
|
|
|
|
/* Trap if accessing an invalid page. */
|
|
mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, TAG_GRANULE,
|
|
MMU_DATA_STORE, 1, ra);
|
|
|
|
/* Store if page supports tags. */
|
|
if (mem) {
|
|
store1(ptr, mem, allocation_tag_from_addr(xt));
|
|
}
|
|
}
|
|
|
|
void HELPER(stg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
|
|
{
|
|
do_stg(env, ptr, xt, GETPC(), store_tag1);
|
|
}
|
|
|
|
void HELPER(stg_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
|
|
{
|
|
do_stg(env, ptr, xt, GETPC(), store_tag1_parallel);
|
|
}
|
|
|
|
void HELPER(stg_stub)(CPUARMState *env, uint64_t ptr)
|
|
{
|
|
int mmu_idx = cpu_mmu_index(env, false);
|
|
uintptr_t ra = GETPC();
|
|
|
|
check_tag_aligned(env, ptr, ra);
|
|
probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
|
|
}
|
|
|
|
static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt,
|
|
uintptr_t ra, stg_store1 store1)
|
|
{
|
|
int mmu_idx = cpu_mmu_index(env, false);
|
|
int tag = allocation_tag_from_addr(xt);
|
|
uint8_t *mem1, *mem2;
|
|
|
|
check_tag_aligned(env, ptr, ra);
|
|
|
|
/*
|
|
* Trap if accessing an invalid page(s).
|
|
* This takes priority over !allocation_tag_access_enabled.
|
|
*/
|
|
if (ptr & TAG_GRANULE) {
|
|
/* Two stores unaligned mod TAG_GRANULE*2 -- modify two bytes. */
|
|
mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
|
|
TAG_GRANULE, MMU_DATA_STORE, 1, ra);
|
|
mem2 = allocation_tag_mem(env, mmu_idx, ptr + TAG_GRANULE,
|
|
MMU_DATA_STORE, TAG_GRANULE,
|
|
MMU_DATA_STORE, 1, ra);
|
|
|
|
/* Store if page(s) support tags. */
|
|
if (mem1) {
|
|
store1(TAG_GRANULE, mem1, tag);
|
|
}
|
|
if (mem2) {
|
|
store1(0, mem2, tag);
|
|
}
|
|
} else {
|
|
/* Two stores aligned mod TAG_GRANULE*2 -- modify one byte. */
|
|
mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
|
|
2 * TAG_GRANULE, MMU_DATA_STORE, 1, ra);
|
|
if (mem1) {
|
|
tag |= tag << 4;
|
|
atomic_set(mem1, tag);
|
|
}
|
|
}
|
|
}
|
|
|
|
void HELPER(st2g)(CPUARMState *env, uint64_t ptr, uint64_t xt)
|
|
{
|
|
do_st2g(env, ptr, xt, GETPC(), store_tag1);
|
|
}
|
|
|
|
void HELPER(st2g_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
|
|
{
|
|
do_st2g(env, ptr, xt, GETPC(), store_tag1_parallel);
|
|
}
|
|
|
|
void HELPER(st2g_stub)(CPUARMState *env, uint64_t ptr)
|
|
{
|
|
int mmu_idx = cpu_mmu_index(env, false);
|
|
uintptr_t ra = GETPC();
|
|
int in_page = -(ptr | TARGET_PAGE_MASK);
|
|
|
|
check_tag_aligned(env, ptr, ra);
|
|
|
|
if (likely(in_page >= 2 * TAG_GRANULE)) {
|
|
probe_write(env, ptr, 2 * TAG_GRANULE, mmu_idx, ra);
|
|
} else {
|
|
probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
|
|
probe_write(env, ptr + TAG_GRANULE, TAG_GRANULE, mmu_idx, ra);
|
|
}
|
|
}
|
|
|
|
#define LDGM_STGM_SIZE (4 << GMID_EL1_BS)
|
|
|
|
uint64_t HELPER(ldgm)(CPUARMState *env, uint64_t ptr)
|
|
{
|
|
int mmu_idx = cpu_mmu_index(env, false);
|
|
uintptr_t ra = GETPC();
|
|
void *tag_mem;
|
|
|
|
ptr = QEMU_ALIGN_DOWN(ptr, LDGM_STGM_SIZE);
|
|
|
|
/* Trap if accessing an invalid page. */
|
|
tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD,
|
|
LDGM_STGM_SIZE, MMU_DATA_LOAD,
|
|
LDGM_STGM_SIZE / (2 * TAG_GRANULE), ra);
|
|
|
|
/* The tag is squashed to zero if the page does not support tags. */
|
|
if (!tag_mem) {
|
|
return 0;
|
|
}
|
|
|
|
QEMU_BUILD_BUG_ON(GMID_EL1_BS != 6);
|
|
/*
|
|
* We are loading 64-bits worth of tags. The ordering of elements
|
|
* within the word corresponds to a 64-bit little-endian operation.
|
|
*/
|
|
return ldq_le_p(tag_mem);
|
|
}
|
|
|
|
void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val)
|
|
{
|
|
int mmu_idx = cpu_mmu_index(env, false);
|
|
uintptr_t ra = GETPC();
|
|
void *tag_mem;
|
|
|
|
ptr = QEMU_ALIGN_DOWN(ptr, LDGM_STGM_SIZE);
|
|
|
|
/* Trap if accessing an invalid page. */
|
|
tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
|
|
LDGM_STGM_SIZE, MMU_DATA_LOAD,
|
|
LDGM_STGM_SIZE / (2 * TAG_GRANULE), ra);
|
|
|
|
/*
|
|
* Tag store only happens if the page support tags,
|
|
* and if the OS has enabled access to the tags.
|
|
*/
|
|
if (!tag_mem) {
|
|
return;
|
|
}
|
|
|
|
QEMU_BUILD_BUG_ON(GMID_EL1_BS != 6);
|
|
/*
|
|
* We are storing 64-bits worth of tags. The ordering of elements
|
|
* within the word corresponds to a 64-bit little-endian operation.
|
|
*/
|
|
stq_le_p(tag_mem, val);
|
|
}
|
|
|
|
void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val)
|
|
{
|
|
uintptr_t ra = GETPC();
|
|
int mmu_idx = cpu_mmu_index(env, false);
|
|
int log2_dcz_bytes, log2_tag_bytes;
|
|
intptr_t dcz_bytes, tag_bytes;
|
|
uint8_t *mem;
|
|
|
|
/*
|
|
* In arm_cpu_realizefn, we assert that dcz > LOG2_TAG_GRANULE+1,
|
|
* i.e. 32 bytes, which is an unreasonably small dcz anyway,
|
|
* to make sure that we can access one complete tag byte here.
|
|
*/
|
|
log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2;
|
|
log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1);
|
|
dcz_bytes = (intptr_t)1 << log2_dcz_bytes;
|
|
tag_bytes = (intptr_t)1 << log2_tag_bytes;
|
|
ptr &= -dcz_bytes;
|
|
|
|
mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, dcz_bytes,
|
|
MMU_DATA_STORE, tag_bytes, ra);
|
|
if (mem) {
|
|
int tag_pair = (val & 0xf) * 0x11;
|
|
memset(mem, tag_pair, tag_bytes);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Perform an MTE checked access for a single logical or atomic access.
|
|
*/
|
|
uint64_t HELPER(mte_check1)(CPUARMState *env, uint32_t desc, uint64_t ptr)
|
|
{
|
|
return ptr;
|
|
}
|
|
|
|
/*
|
|
* Perform an MTE checked access for multiple logical accesses.
|
|
*/
|
|
uint64_t HELPER(mte_checkN)(CPUARMState *env, uint32_t desc, uint64_t ptr)
|
|
{
|
|
return ptr;
|
|
}
|