mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2024-12-22 14:15:32 +00:00
memory: Single byte swap along the I/O path
Now that MemOp has been pushed down into the memory API, and callers are encoding endianness, we can collapse byte swaps along the I/O path into the accelerator and target independent adjust_endianness. Collapsing byte swaps along the I/O path enables additional endian inversion logic, e.g. SPARC64 Invert Endian TTE bit, with redundant byte swaps cancelling out. Backports commit 9bf825bf3df4ebae3af51566c8088e3f1249a910 from qemu
This commit is contained in:
parent
ad8957a4c3
commit
103d6f51c8
|
@ -834,38 +834,6 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
|||
cpu_loop_exit_atomic(env_cpu(env), retaddr);
|
||||
}
|
||||
|
||||
#ifdef TARGET_WORDS_BIGENDIAN
|
||||
#define NEED_BE_BSWAP 0
|
||||
#define NEED_LE_BSWAP 1
|
||||
#else
|
||||
#define NEED_BE_BSWAP 1
|
||||
#define NEED_LE_BSWAP 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Byte Swap Helper
|
||||
*
|
||||
* This should all dead code away depending on the build host and
|
||||
* access type.
|
||||
*/
|
||||
|
||||
static inline uint64_t handle_bswap(uint64_t val, MemOp op)
|
||||
{
|
||||
if ((memop_big_endian(op) && NEED_BE_BSWAP) ||
|
||||
(!memop_big_endian(op) && NEED_LE_BSWAP)) {
|
||||
switch (op & MO_SIZE) {
|
||||
case MO_8: return val;
|
||||
case MO_16: return bswap16(val);
|
||||
case MO_32: return bswap32(val);
|
||||
case MO_64: return bswap64(val);
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
} else {
|
||||
return val;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Load Helpers
|
||||
*
|
||||
|
@ -1045,10 +1013,8 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
|
|||
}
|
||||
}
|
||||
|
||||
/* TODO: Merge bswap into io_readx -> memory_region_dispatch_read. */
|
||||
res = io_readx(env, &env->iotlb[mmu_idx][index], mmu_idx, addr,
|
||||
mmu_idx, addr, retaddr, access_type, op);
|
||||
return handle_bswap(res, op);
|
||||
return io_readx(env, &env->iotlb[mmu_idx][index],
|
||||
mmu_idx, addr, retaddr, access_type, op);
|
||||
}
|
||||
|
||||
/* Handle slow unaligned access (it spans two pages or IO). */
|
||||
|
@ -1366,10 +1332,8 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
|||
}
|
||||
}
|
||||
|
||||
/* TODO: Merge bswap into io_writex -> memory_region_dispatch_write. */
|
||||
io_writex(env, &env->iotlb[mmu_idx][index], mmu_idx,
|
||||
handle_bswap(val, op),
|
||||
addr, retaddr, op);
|
||||
val, addr, retaddr, op);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
19
qemu/exec.c
19
qemu/exec.c
|
@ -1957,16 +1957,9 @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
|
|||
// Unicorn: commented out
|
||||
//release_lock |= prepare_mmio_access(mr);
|
||||
l = memory_access_size(mr, l, addr1);
|
||||
/* XXX: could force current_cpu to NULL to avoid
|
||||
potential bugs */
|
||||
val = ldn_p(buf, l);
|
||||
/*
|
||||
* TODO: Merge bswap from ldn_p into memory_region_dispatch_write
|
||||
* by using ldn_he_p and dropping MO_TE to get a host-endian value.
|
||||
*/
|
||||
val = ldn_he_p(buf, l);
|
||||
result |= memory_region_dispatch_write(mr, addr1, val,
|
||||
size_memop(l) | MO_TE,
|
||||
attrs);
|
||||
size_memop(l), attrs);
|
||||
} else {
|
||||
/* RAM case */
|
||||
ptr = qemu_map_ram_ptr(mr->uc, mr->ram_block, addr1);
|
||||
|
@ -2042,13 +2035,9 @@ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
|
|||
// Unicorn: commented out
|
||||
//release_lock |= prepare_mmio_access(mr);
|
||||
l = memory_access_size(mr, l, addr1);
|
||||
/*
|
||||
* TODO: Merge bswap from stn_p into memory_region_dispatch_read
|
||||
* by using stn_he_p and dropping MO_TE to get a host-endian value.
|
||||
*/
|
||||
result |= memory_region_dispatch_read(mr, addr1, &val,
|
||||
size_memop(l) | MO_TE, attrs);
|
||||
stn_p(buf, l, val);
|
||||
size_memop(l), attrs);
|
||||
stn_he_p(buf, l, val);
|
||||
} else {
|
||||
/* RAM case */
|
||||
ptr = qemu_map_ram_ptr(mr->uc, mr->ram_block, addr1);
|
||||
|
|
|
@ -413,32 +413,23 @@ static bool memory_region_big_endian(MemoryRegion *mr)
|
|||
#endif
|
||||
}
|
||||
|
||||
static bool memory_region_wrong_endianness(MemoryRegion *mr)
|
||||
static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op)
|
||||
{
|
||||
#ifdef TARGET_WORDS_BIGENDIAN
|
||||
return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
|
||||
#else
|
||||
return mr->ops->endianness == DEVICE_BIG_ENDIAN;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
|
||||
{
|
||||
if (memory_region_wrong_endianness(mr)) {
|
||||
switch (size) {
|
||||
case 1:
|
||||
if ((op & MO_BSWAP) != devend_memop(mr->ops->endianness)) {
|
||||
switch (op & MO_SIZE) {
|
||||
case MO_8:
|
||||
break;
|
||||
case 2:
|
||||
case MO_16:
|
||||
*data = bswap16(*data);
|
||||
break;
|
||||
case 4:
|
||||
case MO_32:
|
||||
*data = bswap32(*data);
|
||||
break;
|
||||
case 8:
|
||||
case MO_64:
|
||||
*data = bswap64(*data);
|
||||
break;
|
||||
default:
|
||||
abort();
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1319,7 +1310,7 @@ MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
|
|||
}
|
||||
|
||||
r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
|
||||
adjust_endianness(mr, pval, size);
|
||||
adjust_endianness(mr, pval, op);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -1336,7 +1327,7 @@ MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
|
|||
return MEMTX_DECODE_ERROR;
|
||||
}
|
||||
|
||||
adjust_endianness(mr, &data, size);
|
||||
adjust_endianness(mr, &data, op);
|
||||
|
||||
if (mr->ops->write) {
|
||||
return access_with_adjusted_size(addr, &data, size,
|
||||
|
|
|
@ -42,18 +42,8 @@ static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL,
|
|||
//release_lock |= prepare_mmio_access(mr);
|
||||
|
||||
/* I/O case */
|
||||
/* TODO: Merge bswap32 into memory_region_dispatch_read. */
|
||||
r = memory_region_dispatch_read(mr, addr1, &val,
|
||||
MO_32 | devend_memop(endian), attrs);
|
||||
#if defined(TARGET_WORDS_BIGENDIAN)
|
||||
if (endian == DEVICE_LITTLE_ENDIAN) {
|
||||
val = bswap32(val);
|
||||
}
|
||||
#else
|
||||
if (endian == DEVICE_BIG_ENDIAN) {
|
||||
val = bswap32(val);
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
/* RAM case */
|
||||
ptr = qemu_map_ram_ptr(mr->uc, mr->ram_block, addr1);
|
||||
|
@ -144,18 +134,8 @@ static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL,
|
|||
//release_lock |= prepare_mmio_access(mr);
|
||||
|
||||
/* I/O case */
|
||||
/* TODO: Merge bswap64 into memory_region_dispatch_read. */
|
||||
r = memory_region_dispatch_read(mr, addr1, &val,
|
||||
MO_64 | devend_memop(endian), attrs);
|
||||
#if defined(TARGET_WORDS_BIGENDIAN)
|
||||
if (endian == DEVICE_LITTLE_ENDIAN) {
|
||||
val = bswap64(val);
|
||||
}
|
||||
#else
|
||||
if (endian == DEVICE_BIG_ENDIAN) {
|
||||
val = bswap64(val);
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
/* RAM case */
|
||||
ptr = qemu_map_ram_ptr(mr->uc, mr->ram_block, addr1);
|
||||
|
@ -292,18 +272,8 @@ static inline uint32_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL,
|
|||
//release_lock |= prepare_mmio_access(mr);
|
||||
|
||||
/* I/O case */
|
||||
/* TODO: Merge bswap16 into memory_region_dispatch_read. */
|
||||
r = memory_region_dispatch_read(mr, addr1, &val,
|
||||
MO_16 | devend_memop(endian), attrs);
|
||||
#if defined(TARGET_WORDS_BIGENDIAN)
|
||||
if (endian == DEVICE_LITTLE_ENDIAN) {
|
||||
val = bswap16(val);
|
||||
}
|
||||
#else
|
||||
if (endian == DEVICE_BIG_ENDIAN) {
|
||||
val = bswap16(val);
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
/* RAM case */
|
||||
ptr = qemu_map_ram_ptr(mr->uc, mr->ram_block, addr1);
|
||||
|
@ -436,17 +406,6 @@ static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL,
|
|||
if (l < 4 || !memory_access_is_direct(mr, true)) {
|
||||
// Unicorn: commented out
|
||||
//release_lock |= prepare_mmio_access(mr);
|
||||
|
||||
#if defined(TARGET_WORDS_BIGENDIAN)
|
||||
if (endian == DEVICE_LITTLE_ENDIAN) {
|
||||
val = bswap32(val);
|
||||
}
|
||||
#else
|
||||
if (endian == DEVICE_BIG_ENDIAN) {
|
||||
val = bswap32(val);
|
||||
}
|
||||
#endif
|
||||
/* TODO: Merge bswap32 into memory_region_dispatch_write. */
|
||||
r = memory_region_dispatch_write(mr, addr1, val,
|
||||
MO_32 | devend_memop(endian), attrs);
|
||||
} else {
|
||||
|
@ -579,17 +538,6 @@ static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL,
|
|||
if (l < 2 || !memory_access_is_direct(mr, true)) {
|
||||
// Unicorn: commented out
|
||||
//release_lock |= prepare_mmio_access(mr);
|
||||
|
||||
#if defined(TARGET_WORDS_BIGENDIAN)
|
||||
if (endian == DEVICE_LITTLE_ENDIAN) {
|
||||
val = bswap16(val);
|
||||
}
|
||||
#else
|
||||
if (endian == DEVICE_BIG_ENDIAN) {
|
||||
val = bswap16(val);
|
||||
}
|
||||
#endif
|
||||
/* TODO: Merge bswap16 into memory_region_dispatch_write. */
|
||||
r = memory_region_dispatch_write(mr, addr1, val,
|
||||
MO_16 | devend_memop(endian), attrs);
|
||||
} else {
|
||||
|
@ -678,17 +626,6 @@ static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL,
|
|||
if (l < 8 || !memory_access_is_direct(mr, true)) {
|
||||
// Unicorn: commented out
|
||||
//release_lock |= prepare_mmio_access(mr);
|
||||
|
||||
#if defined(TARGET_WORDS_BIGENDIAN)
|
||||
if (endian == DEVICE_LITTLE_ENDIAN) {
|
||||
val = bswap64(val);
|
||||
}
|
||||
#else
|
||||
if (endian == DEVICE_BIG_ENDIAN) {
|
||||
val = bswap64(val);
|
||||
}
|
||||
#endif
|
||||
/* TODO: Merge bswap64 into memory_region_dispatch_write. */
|
||||
r = memory_region_dispatch_write(mr, addr1, val,
|
||||
MO_64 | devend_memop(endian), attrs);
|
||||
} else {
|
||||
|
|
Loading…
Reference in a new issue