cputlb: Add functions for flushing TLB for a single MMU index

Guest CPU TLB maintenance operations may be sufficiently
specialized to only need to flush TLB entries corresponding
to a particular MMU index. Implement cputlb functions for
this, to avoid the inefficiency of flushing TLB entries
which we don't need to.

Backports commit d7a74a9d4a68e27b3a8ceda17bb95cb0a23d8e4d from qemu
This commit is contained in:
Peter Maydell 2018-02-15 09:34:07 -05:00 committed by Lioncash
parent 86af3f249d
commit 6e94bda144
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7
16 changed files with 188 additions and 16 deletions

View file

@ -3026,7 +3026,9 @@
#define thumb2_logic_op thumb2_logic_op_aarch64
#define ti925t_initfn ti925t_initfn_aarch64
#define tlb_add_large_page tlb_add_large_page_aarch64
#define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_aarch64
#define tlb_flush_entry tlb_flush_entry_aarch64
#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_aarch64
#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_aarch64
#define tlbi_aa64_asid_write tlbi_aa64_asid_write_aarch64
#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_aarch64

View file

@ -3026,7 +3026,9 @@
#define thumb2_logic_op thumb2_logic_op_aarch64eb
#define ti925t_initfn ti925t_initfn_aarch64eb
#define tlb_add_large_page tlb_add_large_page_aarch64eb
#define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_aarch64eb
#define tlb_flush_entry tlb_flush_entry_aarch64eb
#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_aarch64eb
#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_aarch64eb
#define tlbi_aa64_asid_write tlbi_aa64_asid_write_aarch64eb
#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_aarch64eb

View file

@ -3026,7 +3026,9 @@
#define thumb2_logic_op thumb2_logic_op_arm
#define ti925t_initfn ti925t_initfn_arm
#define tlb_add_large_page tlb_add_large_page_arm
#define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_arm
#define tlb_flush_entry tlb_flush_entry_arm
#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_arm
#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_arm
#define tlbi_aa64_asid_write tlbi_aa64_asid_write_arm
#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_arm

View file

@ -3026,7 +3026,9 @@
#define thumb2_logic_op thumb2_logic_op_armeb
#define ti925t_initfn ti925t_initfn_armeb
#define tlb_add_large_page tlb_add_large_page_armeb
#define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_armeb
#define tlb_flush_entry tlb_flush_entry_armeb
#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_armeb
#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_armeb
#define tlbi_aa64_asid_write tlbi_aa64_asid_write_armeb
#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_armeb

View file

@ -120,21 +120,6 @@ void tlb_flush_page(CPUState *cpu, target_ulong addr)
tb_flush_jmp_cache(cpu, addr);
}
/* update the TLBs so that writes to code in the virtual page 'addr'
can be detected */
void tlb_protect_code(struct uc_struct *uc, ram_addr_t ram_addr)
{
cpu_physical_memory_test_and_clear_dirty(uc, ram_addr, TARGET_PAGE_SIZE,
DIRTY_MEMORY_CODE);
}
/* update the TLB so that writes in physical page 'phys_addr' are no longer
tested for self modifying code */
void tlb_unprotect_code(CPUState *cpu, ram_addr_t ram_addr)
{
cpu_physical_memory_set_dirty_flag(cpu->uc, ram_addr, DIRTY_MEMORY_CODE);
}
void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
uintptr_t length)
{
@ -382,8 +367,48 @@ static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
}
static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
{
CPUArchState *env = cpu->env_ptr;
static void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
#if defined(DEBUG_TLB)
printf("tlb_flush_by_mmuidx:");
#endif
/* must reset current TB so that interrupts cannot modify the
links while we are modifying them */
cpu->current_tb = NULL;
for (;;) {
int mmu_idx = va_arg(argp, int);
if (mmu_idx < 0) {
break;
}
#if defined(DEBUG_TLB)
printf(" %d", mmu_idx);
#endif
memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
}
#if defined(DEBUG_TLB)
printf("\n");
#endif
memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
}
void tlb_flush_by_mmuidx(CPUState *cpu, ...)
{
va_list argp;
va_start(argp, cpu);
v_tlb_flush_by_mmuidx(cpu, argp);
va_end(argp);
}
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
{
if (addr == (tlb_entry->addr_read &
(TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
@ -395,6 +420,76 @@ static void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
}
}
void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
{
CPUArchState *env = cpu->env_ptr;
int i, k;
va_list argp;
va_start(argp, addr);
#if defined(DEBUG_TLB)
printf("tlb_flush_page_by_mmu_idx: " TARGET_FMT_lx, addr);
#endif
/* Check if we need to flush due to large pages. */
if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
#if defined(DEBUG_TLB)
printf(" forced full flush ("
TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
env->tlb_flush_addr, env->tlb_flush_mask);
#endif
v_tlb_flush_by_mmuidx(cpu, argp);
va_end(argp);
return;
}
/* must reset current TB so that interrupts cannot modify the
links while we are modifying them */
cpu->current_tb = NULL;
addr &= TARGET_PAGE_MASK;
i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
for (;;) {
int mmu_idx = va_arg(argp, int);
if (mmu_idx < 0) {
break;
}
#if defined(DEBUG_TLB)
printf(" %d", mmu_idx);
#endif
tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
/* check whether there are vltb entries that need to be flushed */
for (k = 0; k < CPU_VTLB_SIZE; k++) {
tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
}
}
va_end(argp);
#if defined(DEBUG_TLB)
printf("\n");
#endif
tb_flush_jmp_cache(cpu, addr);
}
/* update the TLBs so that writes to code in the virtual page 'addr'
can be detected */
void tlb_protect_code(struct uc_struct *uc, ram_addr_t ram_addr)
{
cpu_physical_memory_test_and_clear_dirty(uc, ram_addr, TARGET_PAGE_SIZE,
DIRTY_MEMORY_CODE);
}
/* update the TLB so that writes in physical page 'phys_addr' are no longer
tested for self modifying code */
void tlb_unprotect_code(CPUState *cpu, ram_addr_t ram_addr)
{
cpu_physical_memory_set_dirty_flag(cpu->uc, ram_addr, DIRTY_MEMORY_CODE);
}
#define MMUSUFFIX _mmu

View file

@ -3032,7 +3032,9 @@ symbols = (
'thumb2_logic_op',
'ti925t_initfn',
'tlb_add_large_page',
'tlb_flush_by_mmuidx',
'tlb_flush_entry',
'tlb_flush_page_by_mmuidx',
'tlbi_aa64_asid_is_write',
'tlbi_aa64_asid_write',
'tlbi_aa64_vaa_is_write',

View file

@ -90,8 +90,48 @@ void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
void cpu_reload_memory_map(CPUState *cpu);
void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as);
/* cputlb.c */
/**
* tlb_flush_page:
* @cpu: CPU whose TLB should be flushed
* @addr: virtual address of page to be flushed
*
* Flush one page from the TLB of the specified CPU, for all
* MMU indexes.
*/
void tlb_flush_page(CPUState *cpu, target_ulong addr);
/**
* tlb_flush:
* @cpu: CPU whose TLB should be flushed
* @flush_global: ignored
*
* Flush the entire TLB for the specified CPU.
* The flush_global flag is in theory an indicator of whether the whole
* TLB should be flushed, or only those entries not marked global.
* In practice QEMU does not implement any global/not global flag for
* TLB entries, and the argument is ignored.
*/
void tlb_flush(CPUState *cpu, int flush_global);
/**
* tlb_flush_page_by_mmuidx:
* @cpu: CPU whose TLB should be flushed
* @addr: virtual address of page to be flushed
* @...: list of MMU indexes to flush, terminated by a negative value
*
* Flush one page from the TLB of the specified CPU, for the specified
* MMU indexes.
*/
void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...);
/**
* tlb_flush_by_mmuidx:
* @cpu: CPU whose TLB should be flushed
* @...: list of MMU indexes to flush, terminated by a negative value
*
* Flush all entries from the TLB of the specified CPU, for the specified
* MMU indexes.
*/
void tlb_flush_by_mmuidx(CPUState *cpu, ...);
void tlb_set_page(CPUState *cpu, target_ulong vaddr,
hwaddr paddr, int prot,
int mmu_idx, target_ulong size);
@ -109,6 +149,15 @@ static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
static inline void tlb_flush(CPUState *cpu, int flush_global)
{
}
static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
target_ulong addr, ...)
{
}
static inline void tlb_flush_by_mmuidx(CPUState *cpu, ...)
{
}
#endif
#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */

View file

@ -3026,7 +3026,9 @@
#define thumb2_logic_op thumb2_logic_op_m68k
#define ti925t_initfn ti925t_initfn_m68k
#define tlb_add_large_page tlb_add_large_page_m68k
#define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_m68k
#define tlb_flush_entry tlb_flush_entry_m68k
#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_m68k
#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_m68k
#define tlbi_aa64_asid_write tlbi_aa64_asid_write_m68k
#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_m68k

View file

@ -3026,7 +3026,9 @@
#define thumb2_logic_op thumb2_logic_op_mips
#define ti925t_initfn ti925t_initfn_mips
#define tlb_add_large_page tlb_add_large_page_mips
#define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_mips
#define tlb_flush_entry tlb_flush_entry_mips
#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_mips
#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_mips
#define tlbi_aa64_asid_write tlbi_aa64_asid_write_mips
#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_mips

View file

@ -3026,7 +3026,9 @@
#define thumb2_logic_op thumb2_logic_op_mips64
#define ti925t_initfn ti925t_initfn_mips64
#define tlb_add_large_page tlb_add_large_page_mips64
#define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_mips64
#define tlb_flush_entry tlb_flush_entry_mips64
#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_mips64
#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_mips64
#define tlbi_aa64_asid_write tlbi_aa64_asid_write_mips64
#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_mips64

View file

@ -3026,7 +3026,9 @@
#define thumb2_logic_op thumb2_logic_op_mips64el
#define ti925t_initfn ti925t_initfn_mips64el
#define tlb_add_large_page tlb_add_large_page_mips64el
#define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_mips64el
#define tlb_flush_entry tlb_flush_entry_mips64el
#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_mips64el
#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_mips64el
#define tlbi_aa64_asid_write tlbi_aa64_asid_write_mips64el
#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_mips64el

View file

@ -3026,7 +3026,9 @@
#define thumb2_logic_op thumb2_logic_op_mipsel
#define ti925t_initfn ti925t_initfn_mipsel
#define tlb_add_large_page tlb_add_large_page_mipsel
#define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_mipsel
#define tlb_flush_entry tlb_flush_entry_mipsel
#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_mipsel
#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_mipsel
#define tlbi_aa64_asid_write tlbi_aa64_asid_write_mipsel
#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_mipsel

View file

@ -3026,7 +3026,9 @@
#define thumb2_logic_op thumb2_logic_op_powerpc
#define ti925t_initfn ti925t_initfn_powerpc
#define tlb_add_large_page tlb_add_large_page_powerpc
#define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_powerpc
#define tlb_flush_entry tlb_flush_entry_powerpc
#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_powerpc
#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_powerpc
#define tlbi_aa64_asid_write tlbi_aa64_asid_write_powerpc
#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_powerpc

View file

@ -3026,7 +3026,9 @@
#define thumb2_logic_op thumb2_logic_op_sparc
#define ti925t_initfn ti925t_initfn_sparc
#define tlb_add_large_page tlb_add_large_page_sparc
#define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_sparc
#define tlb_flush_entry tlb_flush_entry_sparc
#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_sparc
#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_sparc
#define tlbi_aa64_asid_write tlbi_aa64_asid_write_sparc
#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_sparc

View file

@ -3026,7 +3026,9 @@
#define thumb2_logic_op thumb2_logic_op_sparc64
#define ti925t_initfn ti925t_initfn_sparc64
#define tlb_add_large_page tlb_add_large_page_sparc64
#define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_sparc64
#define tlb_flush_entry tlb_flush_entry_sparc64
#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_sparc64
#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_sparc64
#define tlbi_aa64_asid_write tlbi_aa64_asid_write_sparc64
#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_sparc64

View file

@ -3026,7 +3026,9 @@
#define thumb2_logic_op thumb2_logic_op_x86_64
#define ti925t_initfn ti925t_initfn_x86_64
#define tlb_add_large_page tlb_add_large_page_x86_64
#define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_x86_64
#define tlb_flush_entry tlb_flush_entry_x86_64
#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_x86_64
#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_x86_64
#define tlbi_aa64_asid_write tlbi_aa64_asid_write_x86_64
#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_x86_64