mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2025-01-18 15:07:16 +00:00
cputlb: Replace size and endian operands for MemOp
Preparation for collapsing the two byte swaps adjust_endianness and handle_bswap into the former. Backports commit be5c4787e9a6eed12fd765d9e890f7cc6cd63220 from qemu
This commit is contained in:
parent
da98d0da4e
commit
ad8957a4c3
|
@ -585,7 +585,7 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
|
||||||
|
|
||||||
static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
||||||
int mmu_idx, target_ulong addr, uintptr_t retaddr,
|
int mmu_idx, target_ulong addr, uintptr_t retaddr,
|
||||||
MMUAccessType access_type, int size)
|
MMUAccessType access_type, MemOp op)
|
||||||
{
|
{
|
||||||
CPUState *cpu = env_cpu(env);
|
CPUState *cpu = env_cpu(env);
|
||||||
hwaddr mr_offset;
|
hwaddr mr_offset;
|
||||||
|
@ -605,15 +605,13 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
||||||
cpu->mem_io_vaddr = addr;
|
cpu->mem_io_vaddr = addr;
|
||||||
cpu->mem_io_access_type = access_type;
|
cpu->mem_io_access_type = access_type;
|
||||||
|
|
||||||
r = memory_region_dispatch_read(mr, mr_offset, &val,
|
r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs);
|
||||||
size_memop(size) | MO_TE,
|
|
||||||
iotlbentry->attrs);
|
|
||||||
if (r != MEMTX_OK) {
|
if (r != MEMTX_OK) {
|
||||||
hwaddr physaddr = mr_offset +
|
hwaddr physaddr = mr_offset +
|
||||||
section->offset_within_address_space -
|
section->offset_within_address_space -
|
||||||
section->offset_within_region;
|
section->offset_within_region;
|
||||||
|
|
||||||
cpu_transaction_failed(cpu, physaddr, addr, size, access_type,
|
cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
|
||||||
mmu_idx, iotlbentry->attrs, r, retaddr);
|
mmu_idx, iotlbentry->attrs, r, retaddr);
|
||||||
}
|
}
|
||||||
return val;
|
return val;
|
||||||
|
@ -621,7 +619,7 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
||||||
|
|
||||||
static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
||||||
int mmu_idx, uint64_t val, target_ulong addr,
|
int mmu_idx, uint64_t val, target_ulong addr,
|
||||||
uintptr_t retaddr, int size)
|
uintptr_t retaddr, MemOp op)
|
||||||
{
|
{
|
||||||
CPUState *cpu = env_cpu(env);
|
CPUState *cpu = env_cpu(env);
|
||||||
hwaddr mr_offset;
|
hwaddr mr_offset;
|
||||||
|
@ -637,16 +635,15 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
||||||
}
|
}
|
||||||
cpu->mem_io_vaddr = addr;
|
cpu->mem_io_vaddr = addr;
|
||||||
cpu->mem_io_pc = retaddr;
|
cpu->mem_io_pc = retaddr;
|
||||||
r = memory_region_dispatch_write(mr, mr_offset, val,
|
r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs);
|
||||||
size_memop(size) | MO_TE,
|
|
||||||
iotlbentry->attrs);
|
|
||||||
if (r != MEMTX_OK) {
|
if (r != MEMTX_OK) {
|
||||||
hwaddr physaddr = mr_offset +
|
hwaddr physaddr = mr_offset +
|
||||||
section->offset_within_address_space -
|
section->offset_within_address_space -
|
||||||
section->offset_within_region;
|
section->offset_within_region;
|
||||||
|
|
||||||
cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE,
|
cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
|
||||||
mmu_idx, iotlbentry->attrs, r, retaddr);
|
MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r,
|
||||||
|
retaddr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -852,14 +849,15 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
||||||
* access type.
|
* access type.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static inline uint64_t handle_bswap(uint64_t val, int size, bool big_endian)
|
static inline uint64_t handle_bswap(uint64_t val, MemOp op)
|
||||||
{
|
{
|
||||||
if ((big_endian && NEED_BE_BSWAP) || (!big_endian && NEED_LE_BSWAP)) {
|
if ((memop_big_endian(op) && NEED_BE_BSWAP) ||
|
||||||
switch (size) {
|
(!memop_big_endian(op) && NEED_LE_BSWAP)) {
|
||||||
case 1: return val;
|
switch (op & MO_SIZE) {
|
||||||
case 2: return bswap16(val);
|
case MO_8: return val;
|
||||||
case 4: return bswap32(val);
|
case MO_16: return bswap16(val);
|
||||||
case 8: return bswap64(val);
|
case MO_32: return bswap32(val);
|
||||||
|
case MO_64: return bswap64(val);
|
||||||
default:
|
default:
|
||||||
g_assert_not_reached();
|
g_assert_not_reached();
|
||||||
}
|
}
|
||||||
|
@ -882,8 +880,7 @@ typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
|
||||||
|
|
||||||
static inline uint64_t __attribute__((always_inline))
|
static inline uint64_t __attribute__((always_inline))
|
||||||
load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
|
load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
|
||||||
uintptr_t retaddr, size_t size, bool big_endian, bool code_read,
|
uintptr_t retaddr, MemOp op, bool code_read, bool is_softmmu_access,
|
||||||
bool is_softmmu_access,
|
|
||||||
FullLoadHelper *full_load)
|
FullLoadHelper *full_load)
|
||||||
{
|
{
|
||||||
uintptr_t mmu_idx = get_mmuidx(oi);
|
uintptr_t mmu_idx = get_mmuidx(oi);
|
||||||
|
@ -897,6 +894,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
|
||||||
unsigned a_bits = get_alignment_bits(get_memop(oi));
|
unsigned a_bits = get_alignment_bits(get_memop(oi));
|
||||||
void *haddr;
|
void *haddr;
|
||||||
uint64_t res;
|
uint64_t res;
|
||||||
|
size_t size = memop_size(op);
|
||||||
int error_code;
|
int error_code;
|
||||||
struct hook *hook;
|
struct hook *hook;
|
||||||
bool handled;
|
bool handled;
|
||||||
|
@ -1049,8 +1047,8 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
|
||||||
|
|
||||||
/* TODO: Merge bswap into io_readx -> memory_region_dispatch_read. */
|
/* TODO: Merge bswap into io_readx -> memory_region_dispatch_read. */
|
||||||
res = io_readx(env, &env->iotlb[mmu_idx][index], mmu_idx, addr,
|
res = io_readx(env, &env->iotlb[mmu_idx][index], mmu_idx, addr,
|
||||||
retaddr, access_type, size);
|
mmu_idx, addr, retaddr, access_type, op);
|
||||||
return handle_bswap(res, size, big_endian);
|
return handle_bswap(res, op);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Handle slow unaligned access (it spans two pages or IO). */
|
/* Handle slow unaligned access (it spans two pages or IO). */
|
||||||
|
@ -1067,7 +1065,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
|
||||||
r2 = full_load(env, addr2, oi, retaddr);
|
r2 = full_load(env, addr2, oi, retaddr);
|
||||||
shift = (addr & (size - 1)) * 8;
|
shift = (addr & (size - 1)) * 8;
|
||||||
|
|
||||||
if (big_endian) {
|
if (memop_big_endian(op)) {
|
||||||
/* Big-endian combine. */
|
/* Big-endian combine. */
|
||||||
res = (r1 << shift) | (r2 >> ((size * 8) - shift));
|
res = (r1 << shift) | (r2 >> ((size * 8) - shift));
|
||||||
} else {
|
} else {
|
||||||
|
@ -1080,30 +1078,27 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
|
||||||
|
|
||||||
do_aligned_access:
|
do_aligned_access:
|
||||||
haddr = (void *)((uintptr_t)addr + entry->addend);
|
haddr = (void *)((uintptr_t)addr + entry->addend);
|
||||||
switch (size) {
|
switch (op) {
|
||||||
case 1:
|
case MO_UB:
|
||||||
res = ldub_p(haddr);
|
res = ldub_p(haddr);
|
||||||
break;
|
break;
|
||||||
case 2:
|
case MO_BEUW:
|
||||||
if (big_endian) {
|
res = lduw_be_p(haddr);
|
||||||
res = lduw_be_p(haddr);
|
|
||||||
} else {
|
|
||||||
res = lduw_le_p(haddr);
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case 4:
|
case MO_LEUW:
|
||||||
if (big_endian) {
|
res = lduw_le_p(haddr);
|
||||||
res = (uint32_t)ldl_be_p(haddr);
|
|
||||||
} else {
|
|
||||||
res = (uint32_t)ldl_le_p(haddr);
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case 8:
|
case MO_BEUL:
|
||||||
if (big_endian) {
|
res = (uint32_t)ldl_be_p(haddr);
|
||||||
res = ldq_be_p(haddr);
|
break;
|
||||||
} else {
|
case MO_LEUL:
|
||||||
res = ldq_le_p(haddr);
|
res = (uint32_t)ldl_le_p(haddr);
|
||||||
}
|
break;
|
||||||
|
case MO_BEQ:
|
||||||
|
res = ldq_be_p(haddr);
|
||||||
|
break;
|
||||||
|
case MO_LEQ:
|
||||||
|
res = ldq_le_p(haddr);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
g_assert_not_reached();
|
g_assert_not_reached();
|
||||||
|
@ -1136,7 +1131,7 @@ finished:
|
||||||
static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
|
static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return load_helper(env, addr, oi, retaddr, 1, false, false, false,
|
return load_helper(env, addr, oi, retaddr, MO_UB, false, false,
|
||||||
full_ldub_mmu);
|
full_ldub_mmu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1149,7 +1144,7 @@ tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
|
||||||
static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
|
static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return load_helper(env, addr, oi, retaddr, 2, false, false, false,
|
return load_helper(env, addr, oi, retaddr, MO_LEUW, false, false,
|
||||||
full_le_lduw_mmu);
|
full_le_lduw_mmu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1162,7 +1157,7 @@ tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
|
||||||
static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
|
static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return load_helper(env, addr, oi, retaddr, 2, true, false, false,
|
return load_helper(env, addr, oi, retaddr, MO_BEUW, false, false,
|
||||||
full_be_lduw_mmu);
|
full_be_lduw_mmu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1175,7 +1170,7 @@ tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
|
||||||
static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
|
static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return load_helper(env, addr, oi, retaddr, 4, false, false, false,
|
return load_helper(env, addr, oi, retaddr, MO_LEUL, false, false,
|
||||||
full_le_ldul_mmu);
|
full_le_ldul_mmu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1188,7 +1183,7 @@ tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
|
||||||
static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
|
static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return load_helper(env, addr, oi, retaddr, 4, true, false, false,
|
return load_helper(env, addr, oi, retaddr, MO_BEUL, false, false,
|
||||||
full_be_ldul_mmu);
|
full_be_ldul_mmu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1201,14 +1196,14 @@ tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
|
||||||
uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
|
uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return load_helper(env, addr, oi, retaddr, 8, false, false, false,
|
return load_helper(env, addr, oi, retaddr, MO_LEQ, false, false,
|
||||||
helper_le_ldq_mmu);
|
helper_le_ldq_mmu);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
|
uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return load_helper(env, addr, oi, retaddr, 8, true, false, false,
|
return load_helper(env, addr, oi, retaddr, MO_BEQ, false, false,
|
||||||
helper_be_ldq_mmu);
|
helper_be_ldq_mmu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1254,7 +1249,7 @@ tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
|
||||||
|
|
||||||
static inline void __attribute__((always_inline))
|
static inline void __attribute__((always_inline))
|
||||||
store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr, size_t size, bool big_endian)
|
TCGMemOpIdx oi, uintptr_t retaddr, MemOp op)
|
||||||
{
|
{
|
||||||
uintptr_t mmu_idx = get_mmuidx(oi);
|
uintptr_t mmu_idx = get_mmuidx(oi);
|
||||||
uintptr_t index = tlb_index(env, mmu_idx, addr);
|
uintptr_t index = tlb_index(env, mmu_idx, addr);
|
||||||
|
@ -1263,6 +1258,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||||
const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
|
const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
|
||||||
unsigned a_bits = get_alignment_bits(get_memop(oi));
|
unsigned a_bits = get_alignment_bits(get_memop(oi));
|
||||||
void *haddr;
|
void *haddr;
|
||||||
|
size_t size = memop_size(op);
|
||||||
struct hook *hook;
|
struct hook *hook;
|
||||||
bool handled;
|
bool handled;
|
||||||
HOOK_FOREACH_VAR_DECLARE;
|
HOOK_FOREACH_VAR_DECLARE;
|
||||||
|
@ -1372,8 +1368,8 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||||
|
|
||||||
/* TODO: Merge bswap into io_writex -> memory_region_dispatch_write. */
|
/* TODO: Merge bswap into io_writex -> memory_region_dispatch_write. */
|
||||||
io_writex(env, &env->iotlb[mmu_idx][index], mmu_idx,
|
io_writex(env, &env->iotlb[mmu_idx][index], mmu_idx,
|
||||||
handle_bswap(val, size, big_endian),
|
handle_bswap(val, op),
|
||||||
addr, retaddr, size);
|
addr, retaddr, op);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1409,7 +1405,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < size; ++i) {
|
for (i = 0; i < size; ++i) {
|
||||||
uint8_t val8;
|
uint8_t val8;
|
||||||
if (big_endian) {
|
if (memop_big_endian(op)) {
|
||||||
/* Big-endian extract. */
|
/* Big-endian extract. */
|
||||||
val8 = val >> (((size - 1) * 8) - (i * 8));
|
val8 = val >> (((size - 1) * 8) - (i * 8));
|
||||||
} else {
|
} else {
|
||||||
|
@ -1423,30 +1419,27 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||||
|
|
||||||
do_aligned_access:
|
do_aligned_access:
|
||||||
haddr = (void *)((uintptr_t)addr + entry->addend);
|
haddr = (void *)((uintptr_t)addr + entry->addend);
|
||||||
switch (size) {
|
switch (op) {
|
||||||
case 1:
|
case MO_UB:
|
||||||
stb_p(haddr, val);
|
stb_p(haddr, val);
|
||||||
break;
|
break;
|
||||||
case 2:
|
case MO_BEUW:
|
||||||
if (big_endian) {
|
stw_be_p(haddr, val);
|
||||||
stw_be_p(haddr, val);
|
|
||||||
} else {
|
|
||||||
stw_le_p(haddr, val);
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case 4:
|
case MO_LEUW:
|
||||||
if (big_endian) {
|
stw_le_p(haddr, val);
|
||||||
stl_be_p(haddr, val);
|
|
||||||
} else {
|
|
||||||
stl_le_p(haddr, val);
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case 8:
|
case MO_BEUL:
|
||||||
if (big_endian) {
|
stl_be_p(haddr, val);
|
||||||
stq_be_p(haddr, val);
|
break;
|
||||||
} else {
|
case MO_LEUL:
|
||||||
stq_le_p(haddr, val);
|
stl_le_p(haddr, val);
|
||||||
}
|
break;
|
||||||
|
case MO_BEQ:
|
||||||
|
stq_be_p(haddr, val);
|
||||||
|
break;
|
||||||
|
case MO_LEQ:
|
||||||
|
stq_le_p(haddr, val);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
g_assert_not_reached();
|
g_assert_not_reached();
|
||||||
|
@ -1457,43 +1450,43 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||||
void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
|
void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
store_helper(env, addr, val, oi, retaddr, 1, false);
|
store_helper(env, addr, val, oi, retaddr, MO_UB);
|
||||||
}
|
}
|
||||||
|
|
||||||
void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
|
void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
store_helper(env, addr, val, oi, retaddr, 2, false);
|
store_helper(env, addr, val, oi, retaddr, MO_LEUW);
|
||||||
}
|
}
|
||||||
|
|
||||||
void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
|
void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
store_helper(env, addr, val, oi, retaddr, 2, true);
|
store_helper(env, addr, val, oi, retaddr, MO_BEUW);
|
||||||
}
|
}
|
||||||
|
|
||||||
void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
|
void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
store_helper(env, addr, val, oi, retaddr, 4, false);
|
store_helper(env, addr, val, oi, retaddr, MO_LEUL);
|
||||||
}
|
}
|
||||||
|
|
||||||
void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
|
void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
store_helper(env, addr, val, oi, retaddr, 4, true);
|
store_helper(env, addr, val, oi, retaddr, MO_BEUL);
|
||||||
}
|
}
|
||||||
|
|
||||||
void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
|
void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
store_helper(env, addr, val, oi, retaddr, 8, false);
|
store_helper(env, addr, val, oi, retaddr, MO_LEQ);
|
||||||
}
|
}
|
||||||
|
|
||||||
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
|
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
store_helper(env, addr, val, oi, retaddr, 8, true);
|
store_helper(env, addr, val, oi, retaddr, MO_BEQ);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* First set of helpers allows passing in of OI and RETADDR. This makes
|
/* First set of helpers allows passing in of OI and RETADDR. This makes
|
||||||
|
@ -1552,8 +1545,7 @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||||
static uint64_t full_ldub_cmmu(CPUArchState *env, target_ulong addr,
|
static uint64_t full_ldub_cmmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return load_helper(env, addr, oi, retaddr, 1, false, true, true,
|
return load_helper(env, addr, oi, retaddr, MO_8, true, true, full_ldub_cmmu);
|
||||||
full_ldub_cmmu);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
|
uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
|
||||||
|
@ -1565,7 +1557,7 @@ uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
|
||||||
static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
|
static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return load_helper(env, addr, oi, retaddr, 2, false, true, true,
|
return load_helper(env, addr, oi, retaddr, MO_LEUW, true, true,
|
||||||
full_le_lduw_cmmu);
|
full_le_lduw_cmmu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1578,7 +1570,7 @@ uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
|
||||||
static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
|
static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return load_helper(env, addr, oi, retaddr, 2, true, true, true,
|
return load_helper(env, addr, oi, retaddr, MO_BEUW, true, true,
|
||||||
full_be_lduw_cmmu);
|
full_be_lduw_cmmu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1591,7 +1583,7 @@ uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
|
||||||
static uint64_t full_le_ldul_cmmu(CPUArchState *env, target_ulong addr,
|
static uint64_t full_le_ldul_cmmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return load_helper(env, addr, oi, retaddr, 4, false, true, true,
|
return load_helper(env, addr, oi, retaddr, MO_LEUL, true, true,
|
||||||
full_le_ldul_cmmu);
|
full_le_ldul_cmmu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1604,7 +1596,7 @@ uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
|
||||||
static uint64_t full_be_ldul_cmmu(CPUArchState *env, target_ulong addr,
|
static uint64_t full_be_ldul_cmmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return load_helper(env, addr, oi, retaddr, 4, true, true, true,
|
return load_helper(env, addr, oi, retaddr, MO_BEUL, true, true,
|
||||||
full_be_ldul_cmmu);
|
full_be_ldul_cmmu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1617,13 +1609,13 @@ uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
|
||||||
uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
|
uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return load_helper(env, addr, oi, retaddr, 8, false, true, true,
|
return load_helper(env, addr, oi, retaddr, MO_LEQ, true, true,
|
||||||
helper_le_ldq_cmmu);
|
helper_le_ldq_cmmu);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
|
uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return load_helper(env, addr, oi, retaddr, 8, true, true, true,
|
return load_helper(env, addr, oi, retaddr, MO_BEQ, true, true,
|
||||||
helper_be_ldq_cmmu);
|
helper_be_ldq_cmmu);
|
||||||
}
|
}
|
||||||
|
|
|
@ -125,4 +125,10 @@ static inline MemOp size_memop(unsigned size)
|
||||||
return ctz32(size);
|
return ctz32(size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Big endianness from MemOp. */
|
||||||
|
static inline bool memop_big_endian(MemOp op)
|
||||||
|
{
|
||||||
|
return (op & MO_BSWAP) == MO_BE;
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
Loading…
Reference in a new issue