mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2025-03-23 05:25:11 +00:00
Make CPU iotlb a structure rather than a plain hwaddr
Make the CPU iotlb a structure rather than a plain hwaddr; this will allow us to add transaction attributes to it. Backports commit e469b22ffda40188954fafaf6e3308f58d50f8f8 from qemu
This commit is contained in:
parent
825e74410f
commit
2aecce835b
|
@ -249,7 +249,7 @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr,
|
|||
env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
|
||||
|
||||
/* refill the tlb */
|
||||
env->iotlb[mmu_idx][index] = iotlb - vaddr;
|
||||
env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
|
||||
te->addend = (uintptr_t)(addend - vaddr);
|
||||
if (prot & PAGE_READ) {
|
||||
te->addr_read = address;
|
||||
|
@ -302,7 +302,7 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
|
|||
return -1;
|
||||
}
|
||||
}
|
||||
pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK;
|
||||
pd = env1->iotlb[mmu_idx][page_index].addr & ~TARGET_PAGE_MASK;
|
||||
mr = iotlb_to_region(cpu, pd);
|
||||
if (memory_region_is_unassigned(cpu->uc, mr)) {
|
||||
CPUClass *cc = CPU_GET_CLASS(env1->uc, cpu);
|
||||
|
|
|
@ -121,12 +121,21 @@ typedef struct CPUTLBEntry {
|
|||
|
||||
QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
|
||||
|
||||
/* The IOTLB is not accessed directly inline by generated TCG code,
|
||||
* so the CPUIOTLBEntry layout is not as critical as that of the
|
||||
* CPUTLBEntry. (This is also why we don't want to combine the two
|
||||
* structs into one.)
|
||||
*/
|
||||
typedef struct CPUIOTLBEntry {
|
||||
hwaddr addr;
|
||||
} CPUIOTLBEntry;
|
||||
|
||||
#define CPU_COMMON_TLB \
|
||||
/* The meaning of the MMU modes is defined in the target code. */ \
|
||||
CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \
|
||||
CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE]; \
|
||||
hwaddr iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \
|
||||
hwaddr iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE]; \
|
||||
CPUIOTLBEntry iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \
|
||||
CPUIOTLBEntry iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE]; \
|
||||
target_ulong tlb_flush_addr; \
|
||||
target_ulong tlb_flush_mask; \
|
||||
target_ulong vtlb_index; \
|
||||
|
|
|
@ -125,7 +125,7 @@
|
|||
* victim tlb. try to refill from the victim tlb before walking the \
|
||||
* page table. */ \
|
||||
int vidx; \
|
||||
hwaddr tmpiotlb; \
|
||||
CPUIOTLBEntry tmpiotlb; \
|
||||
CPUTLBEntry tmptlb; \
|
||||
for (vidx = CPU_VTLB_SIZE-1; vidx >= 0; --vidx) { \
|
||||
if (env->tlb_v_table[mmu_idx][vidx].ty == (addr & TARGET_PAGE_MASK)) {\
|
||||
|
@ -157,12 +157,13 @@ static inline bool victim_tlb_hit_write(CPUArchState *env, target_ulong addr, in
|
|||
|
||||
#ifndef SOFTMMU_CODE_ACCESS
|
||||
static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
|
||||
hwaddr physaddr,
|
||||
CPUIOTLBEntry *iotlbentry,
|
||||
target_ulong addr,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
uint64_t val;
|
||||
CPUState *cpu = ENV_GET_CPU(env);
|
||||
hwaddr physaddr = iotlbentry->addr;
|
||||
MemoryRegion *mr = iotlb_to_region(cpu, physaddr);
|
||||
|
||||
physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
|
||||
|
@ -316,12 +317,12 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
|
|||
|
||||
/* Handle an IO access. */
|
||||
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
|
||||
hwaddr ioaddr;
|
||||
CPUIOTLBEntry *iotlbentry;
|
||||
if ((addr & (DATA_SIZE - 1)) != 0) {
|
||||
goto do_unaligned_access;
|
||||
}
|
||||
ioaddr = env->iotlb[mmu_idx][index];
|
||||
if (ioaddr == 0) {
|
||||
iotlbentry = &env->iotlb[mmu_idx][index];
|
||||
if (iotlbentry->addr == 0) {
|
||||
env->invalid_addr = addr;
|
||||
env->invalid_error = UC_ERR_READ_UNMAPPED;
|
||||
// printf("Invalid memory read at " TARGET_FMT_lx "\n", addr);
|
||||
|
@ -333,7 +334,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
|
|||
|
||||
/* ??? Note that the io helpers always read data in the target
|
||||
byte ordering. We should push the LE/BE request down into io. */
|
||||
res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
|
||||
res = glue(io_read, SUFFIX)(env, iotlbentry, addr, retaddr);
|
||||
res = TGT_LE(res);
|
||||
goto _out;
|
||||
}
|
||||
|
@ -543,13 +544,13 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
|
|||
|
||||
/* Handle an IO access. */
|
||||
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
|
||||
hwaddr ioaddr;
|
||||
CPUIOTLBEntry *iotlbentry;
|
||||
if ((addr & (DATA_SIZE - 1)) != 0) {
|
||||
goto do_unaligned_access;
|
||||
}
|
||||
ioaddr = env->iotlb[mmu_idx][index];
|
||||
iotlbentry = &env->iotlb[mmu_idx][index];
|
||||
|
||||
if (ioaddr == 0) {
|
||||
if (iotlbentry->addr == 0) {
|
||||
env->invalid_addr = addr;
|
||||
env->invalid_error = UC_ERR_READ_UNMAPPED;
|
||||
// printf("Invalid memory read at " TARGET_FMT_lx "\n", addr);
|
||||
|
@ -559,7 +560,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
|
|||
|
||||
/* ??? Note that the io helpers always read data in the target
|
||||
byte ordering. We should push the LE/BE request down into io. */
|
||||
res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
|
||||
res = glue(io_read, SUFFIX)(env, iotlbentry, addr, retaddr);
|
||||
res = TGT_BE(res);
|
||||
goto _out;
|
||||
}
|
||||
|
@ -658,12 +659,13 @@ WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr,
|
|||
#endif
|
||||
|
||||
static inline void glue(io_write, SUFFIX)(CPUArchState *env,
|
||||
hwaddr physaddr,
|
||||
CPUIOTLBEntry *iotlbentry,
|
||||
DATA_TYPE val,
|
||||
target_ulong addr,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
CPUState *cpu = ENV_GET_CPU(env);
|
||||
hwaddr physaddr = iotlbentry->addr;
|
||||
MemoryRegion *mr = iotlb_to_region(cpu, physaddr);
|
||||
|
||||
physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
|
||||
|
@ -766,12 +768,12 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
|||
|
||||
/* Handle an IO access. */
|
||||
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
|
||||
hwaddr ioaddr;
|
||||
CPUIOTLBEntry *iotlbentry;
|
||||
if ((addr & (DATA_SIZE - 1)) != 0) {
|
||||
goto do_unaligned_access;
|
||||
}
|
||||
ioaddr = env->iotlb[mmu_idx][index];
|
||||
if (ioaddr == 0) {
|
||||
iotlbentry = &env->iotlb[mmu_idx][index];
|
||||
if (iotlbentry->addr == 0) {
|
||||
env->invalid_addr = addr;
|
||||
env->invalid_error = UC_ERR_WRITE_UNMAPPED;
|
||||
// printf("***** Invalid memory write at " TARGET_FMT_lx "\n", addr);
|
||||
|
@ -782,7 +784,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
|||
/* ??? Note that the io helpers always read data in the target
|
||||
byte ordering. We should push the LE/BE request down into io. */
|
||||
val = TGT_LE(val);
|
||||
glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
|
||||
glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -924,12 +926,12 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
|||
|
||||
/* Handle an IO access. */
|
||||
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
|
||||
hwaddr ioaddr;
|
||||
CPUIOTLBEntry *iotlbentry;
|
||||
if ((addr & (DATA_SIZE - 1)) != 0) {
|
||||
goto do_unaligned_access;
|
||||
}
|
||||
ioaddr = env->iotlb[mmu_idx][index];
|
||||
if (ioaddr == 0) {
|
||||
iotlbentry = &env->iotlb[mmu_idx][index];
|
||||
if (iotlbentry->addr == 0) {
|
||||
env->invalid_addr = addr;
|
||||
env->invalid_error = UC_ERR_WRITE_UNMAPPED;
|
||||
// printf("***** Invalid memory write at " TARGET_FMT_lx "\n", addr);
|
||||
|
@ -940,7 +942,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
|||
/* ??? Note that the io helpers always read data in the target
|
||||
byte ordering. We should push the LE/BE request down into io. */
|
||||
val = TGT_BE(val);
|
||||
glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
|
||||
glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue