cputlb: Handle watchpoints via TLB_WATCHPOINT

The raising of exceptions from check_watchpoint, buried inside
of the I/O subsystem, is fundamentally broken. We do not have
the helper return address with which we can unwind guest state.

Replace PHYS_SECTION_WATCH and io_mem_watch with TLB_WATCHPOINT.
Move the call to cpu_check_watchpoint into the cputlb helpers
where we do have the helper return address.

This allows watchpoints on RAM to bypass the full i/o access path.

Backports commit 50b107c5d617eaf93301cef20221312e7a986701 from qemu
This commit is contained in:
Richard Henderson 2020-01-14 06:58:26 -05:00 committed by Lioncash
parent 6c4a3fd06f
commit 07f30382c0
20 changed files with 190 additions and 35 deletions

View file

@ -263,6 +263,7 @@
#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_aarch64
#define cpu_can_do_io cpu_can_do_io_aarch64
#define cpu_can_run cpu_can_run_aarch64
#define cpu_check_watchpoint cpu_check_watchpoint_aarch64
#define cpu_class_init cpu_class_init_aarch64
#define cpu_common_class_by_name cpu_common_class_by_name_aarch64
#define cpu_common_exec_interrupt cpu_common_exec_interrupt_aarch64

View file

@ -263,6 +263,7 @@
#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_aarch64eb
#define cpu_can_do_io cpu_can_do_io_aarch64eb
#define cpu_can_run cpu_can_run_aarch64eb
#define cpu_check_watchpoint cpu_check_watchpoint_aarch64eb
#define cpu_class_init cpu_class_init_aarch64eb
#define cpu_common_class_by_name cpu_common_class_by_name_aarch64eb
#define cpu_common_exec_interrupt cpu_common_exec_interrupt_aarch64eb

View file

@ -359,6 +359,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
target_ulong vaddr_page;
unsigned vidx = env->tlb_d[mmu_idx].vindex++ % CPU_VTLB_SIZE;
int asidx = cpu_asidx_from_attrs(cpu, attrs);
int wp_flags;
if (size <= TARGET_PAGE_SIZE) {
sz = TARGET_PAGE_SIZE;
@ -399,6 +400,8 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
code_address = address;
iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page,
paddr_page, xlat, prot, &address);
wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page,
TARGET_PAGE_SIZE);
index = tlb_index(env, mmu_idx, vaddr_page);
te = tlb_entry(env, mmu_idx, vaddr_page);
@ -425,6 +428,9 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
te->addend = addend - vaddr_page;
if (prot & PAGE_READ) {
te->addr_read = address;
if (wp_flags & BP_MEM_READ) {
te->addr_read |= TLB_WATCHPOINT;
}
} else {
te->addr_read = -1;
}
@ -434,6 +440,8 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
} else {
te->addr_code = -1;
}
te->addr_write = -1;
if (prot & PAGE_WRITE) {
if ((memory_region_is_ram(section->mr) && section->readonly)
|| memory_region_is_romd(section->mr)) {
@ -444,8 +452,9 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
} else {
te->addr_write = address;
}
} else {
te->addr_write = -1;
if (wp_flags & BP_MEM_WRITE) {
te->addr_write |= TLB_WATCHPOINT;
}
}
}
@ -991,14 +1000,33 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
tlb_addr &= ~TLB_INVALID_MASK;
}
/* Handle an IO access. */
/* Handle anything that isn't just a straight memory access. */
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
CPUIOTLBEntry *iotlbentry;
/* For anything that is unaligned, recurse through full_load. */
if ((addr & (size - 1)) != 0) {
goto do_unaligned_access;
}
return io_readx(env, &env->iotlb[mmu_idx][index],
mmu_idx, addr, retaddr, access_type, op);
iotlbentry = &env->iotlb[mmu_idx][index];
/* Handle watchpoints. */
if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
/* On watchpoint hit, this will longjmp out. */
cpu_check_watchpoint(env_cpu(env), addr, size,
iotlbentry->attrs, BP_MEM_READ, retaddr);
/* The backing page may or may not require I/O. */
tlb_addr &= ~TLB_WATCHPOINT;
if ((tlb_addr & ~TARGET_PAGE_MASK) == 0) {
goto do_aligned_access;
}
}
/* Handle I/O access. */
return io_readx(env, iotlbentry, mmu_idx, addr,
retaddr, access_type, op);
}
/* Handle slow unaligned access (it spans two pages or IO). */
@ -1026,6 +1054,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
goto finished;
}
do_aligned_access:
haddr = (void *)((uintptr_t)addr + entry->addend);
switch (op) {
case MO_UB:
@ -1289,14 +1318,32 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
}
/* Handle an IO access. */
/* Handle anything that isn't just a straight memory access. */
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
CPUIOTLBEntry *iotlbentry;
/* For anything that is unaligned, recurse through byte stores. */
if ((addr & (size - 1)) != 0) {
goto do_unaligned_access;
}
io_writex(env, &env->iotlb[mmu_idx][index], mmu_idx,
val, addr, retaddr, op);
iotlbentry = &env->iotlb[mmu_idx][index];
/* Handle watchpoints. */
if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
/* On watchpoint hit, this will longjmp out. */
cpu_check_watchpoint(env_cpu(env), addr, size,
iotlbentry->attrs, BP_MEM_WRITE, retaddr);
/* The backing page may or may not require I/O. */
tlb_addr &= ~TLB_WATCHPOINT;
if ((tlb_addr & ~TARGET_PAGE_MASK) == 0) {
goto do_aligned_access;
}
}
/* Handle I/O access. */
io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, op);
return;
}
@ -1321,10 +1368,29 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
index2 = tlb_index(env, mmu_idx, page2);
entry2 = tlb_entry(env, mmu_idx, page2);
tlb_addr2 = tlb_addr_write(entry2);
if (!tlb_hit_page(tlb_addr2, page2)
&& !victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) {
tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE,
mmu_idx, retaddr);
if (!tlb_hit_page(tlb_addr2, page2)) {
if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) {
tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE,
mmu_idx, retaddr);
index2 = tlb_index(env, mmu_idx, page2);
entry2 = tlb_entry(env, mmu_idx, page2);
}
tlb_addr2 = tlb_addr_write(entry2);
}
/*
* Handle watchpoints. Since this may trap, all checks
* must happen before any store.
*/
if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
cpu_check_watchpoint(env_cpu(env), addr, size - size2,
env->iotlb[mmu_idx][index].attrs,
BP_MEM_WRITE, retaddr);
}
if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) {
cpu_check_watchpoint(env_cpu(env), page2, size2,
env->iotlb[mmu_idx][index2].attrs,
BP_MEM_WRITE, retaddr);
}
/*
@ -1346,6 +1412,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
return;
}
do_aligned_access:
haddr = (void *)((uintptr_t)addr + entry->addend);
switch (op) {
case MO_UB:

View file

@ -263,6 +263,7 @@
#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_arm
#define cpu_can_do_io cpu_can_do_io_arm
#define cpu_can_run cpu_can_run_arm
#define cpu_check_watchpoint cpu_check_watchpoint_arm
#define cpu_class_init cpu_class_init_arm
#define cpu_common_class_by_name cpu_common_class_by_name_arm
#define cpu_common_exec_interrupt cpu_common_exec_interrupt_arm

View file

@ -263,6 +263,7 @@
#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_armeb
#define cpu_can_do_io cpu_can_do_io_armeb
#define cpu_can_run cpu_can_run_armeb
#define cpu_check_watchpoint cpu_check_watchpoint_armeb
#define cpu_class_init cpu_class_init_armeb
#define cpu_common_class_by_name cpu_common_class_by_name_armeb
#define cpu_common_exec_interrupt cpu_common_exec_interrupt_armeb

View file

@ -138,7 +138,6 @@ typedef struct subpage_t {
#define PHYS_SECTION_UNASSIGNED 0
#define PHYS_SECTION_NOTDIRTY 1
#define PHYS_SECTION_ROM 2
#define PHYS_SECTION_WATCH 3
static void memory_map_init(struct uc_struct *uc);
static void tcg_commit(MemoryListener *listener);
@ -735,6 +734,16 @@ int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
{
return -ENOSYS;
}
void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
MemTxAttrs atr, int fl, uintptr_t ra)
{
}
int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len)
{
return 0;
}
#else
/* Add a watchpoint. */
int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
@ -809,9 +818,8 @@ void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
* partially or completely with the address range covered by the
* access).
*/
static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
vaddr addr,
vaddr len)
static inline bool watchpoint_address_matches(CPUWatchpoint *wp,
vaddr addr, vaddr len)
{
/* We know the lengths are non-zero, but a little caution is
* required to avoid errors in the case where the range ends
@ -824,8 +832,82 @@ static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
return !(addr > wpend || wp->vaddr > addrend);
}
/* Return flags for watchpoints that match addr + prot. */
int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len)
{
CPUWatchpoint *wp;
int ret = 0;
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
if (watchpoint_address_matches(wp, addr, TARGET_PAGE_SIZE)) {
ret |= wp->flags;
}
}
return ret;
}
#endif
/* Generate a debug exception if a watchpoint has been hit. */
void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
MemTxAttrs attrs, int flags, uintptr_t ra)
{
CPUClass *cc = CPU_GET_CLASS(cpu->uc, cpu);
CPUWatchpoint *wp;
assert(tcg_enabled(cpu->uc));
if (cpu->watchpoint_hit) {
/*
* We re-entered the check after replacing the TB.
* Now raise the debug interrupt so that it will
* trigger after the current instruction.
*/
//qemu_mutex_lock_iothread();
cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
//qemu_mutex_unlock_iothread();
return;
}
addr = cc->adjust_watchpoint_address(cpu, addr, len);
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
if (watchpoint_address_matches(wp, addr, len)
&& (wp->flags & flags)) {
if (flags == BP_MEM_READ) {
wp->flags |= BP_WATCHPOINT_HIT_READ;
} else {
wp->flags |= BP_WATCHPOINT_HIT_WRITE;
}
wp->hitaddr = MAX(addr, wp->vaddr);
wp->hitattrs = attrs;
if (!cpu->watchpoint_hit) {
if (wp->flags & BP_CPU &&
!cc->debug_check_watchpoint(cpu, wp)) {
wp->flags &= ~BP_WATCHPOINT_HIT;
continue;
}
cpu->watchpoint_hit = wp;
mmap_lock();
tb_check_watchpoint(cpu);
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
cpu->exception_index = EXCP_DEBUG;
mmap_unlock();
cpu_loop_exit_restore(cpu, ra);
} else {
/* Force execution of one insn next time. */
cpu->cflags_next_tb = 1 | curr_cflags(cpu->uc);
mmap_unlock();
if (ra) {
cpu_restore_state(cpu, ra, true);
}
cpu_loop_exit_noexc(cpu);
}
}
} else {
wp->flags &= ~BP_WATCHPOINT_HIT;
}
}
}
/* Add a breakpoint. */
int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
CPUBreakpoint **breakpoint)
@ -965,7 +1047,6 @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu,
target_ulong *address)
{
hwaddr iotlb;
CPUWatchpoint *wp;
if (memory_region_is_ram(section->mr)) {
/* Normal RAM. */
@ -984,19 +1065,6 @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu,
iotlb += xlat;
}
/* Make accesses to pages with watchpoints go via the
watchpoint trap routines. */
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
/* Avoid trapping reads of pages with a write breakpoint. */
if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
iotlb = PHYS_SECTION_WATCH + paddr;
*address |= TLB_MMIO;
break;
}
}
}
return iotlb;
}
#endif /* defined(CONFIG_USER_ONLY) */
@ -1737,8 +1805,6 @@ static void io_mem_init(struct uc_struct* uc)
NULL, UINT64_MAX);
memory_region_init_io(uc, &uc->io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
NULL, UINT64_MAX);
//memory_region_init_io(uc, &uc->io_mem_watch, NULL, &watch_mem_ops, NULL,
// NULL, UINT64_MAX);
}
static subpage_t *subpage_init(FlatView *fv, hwaddr base)
@ -1803,8 +1869,6 @@ AddressSpaceDispatch *address_space_dispatch_new(struct uc_struct *uc, FlatView
assert(n == PHYS_SECTION_NOTDIRTY);
n = dummy_section(&d->map, fv, &uc->io_mem_rom);
assert(n == PHYS_SECTION_ROM);
// n = dummy_section(&d->map, fv, &uc->io_mem_watch);
// assert(n == PHYS_SECTION_WATCH);
d->phys_map = ppe;

View file

@ -269,6 +269,7 @@ symbols = (
'cpu_breakpoint_remove_by_ref',
'cpu_can_do_io',
'cpu_can_run',
'cpu_check_watchpoint',
'cpu_class_init',
'cpu_common_class_by_name',
'cpu_common_exec_interrupt',

View file

@ -330,11 +330,14 @@ CPUArchState *cpu_copy(CPUArchState *env);
#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS - 2))
/* Set if TLB entry is an IO callback. */
#define TLB_MMIO (1 << (TARGET_PAGE_BITS - 3))
/* Set if TLB entry contains a watchpoint. */
#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS - 4))
/* Use this mask to check interception with an alignment mask
* in a TCG backend.
*/
#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO)
#define TLB_FLAGS_MASK \
(TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO | TLB_WATCHPOINT)
/**
* tlb_hit_page: return true if page aligned @addr is a hit against the
@ -427,4 +430,8 @@ static inline CPUNegativeOffsetState *cpu_neg(CPUState *cpu)
return &arch_cpu->neg;
}
int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len);
void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
MemTxAttrs atr, int fl, uintptr_t ra);
#endif /* CPU_ALL_H */

View file

@ -219,6 +219,7 @@ struct CPUWatchpoint {
vaddr vaddr;
vaddr len;
vaddr hitaddr;
MemTxAttrs hitattrs;
int flags; /* BP_* */
QTAILQ_ENTRY(CPUWatchpoint) entry;
};

View file

@ -263,6 +263,7 @@
#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_m68k
#define cpu_can_do_io cpu_can_do_io_m68k
#define cpu_can_run cpu_can_run_m68k
#define cpu_check_watchpoint cpu_check_watchpoint_m68k
#define cpu_class_init cpu_class_init_m68k
#define cpu_common_class_by_name cpu_common_class_by_name_m68k
#define cpu_common_exec_interrupt cpu_common_exec_interrupt_m68k

View file

@ -263,6 +263,7 @@
#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_mips
#define cpu_can_do_io cpu_can_do_io_mips
#define cpu_can_run cpu_can_run_mips
#define cpu_check_watchpoint cpu_check_watchpoint_mips
#define cpu_class_init cpu_class_init_mips
#define cpu_common_class_by_name cpu_common_class_by_name_mips
#define cpu_common_exec_interrupt cpu_common_exec_interrupt_mips

View file

@ -263,6 +263,7 @@
#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_mips64
#define cpu_can_do_io cpu_can_do_io_mips64
#define cpu_can_run cpu_can_run_mips64
#define cpu_check_watchpoint cpu_check_watchpoint_mips64
#define cpu_class_init cpu_class_init_mips64
#define cpu_common_class_by_name cpu_common_class_by_name_mips64
#define cpu_common_exec_interrupt cpu_common_exec_interrupt_mips64

View file

@ -263,6 +263,7 @@
#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_mips64el
#define cpu_can_do_io cpu_can_do_io_mips64el
#define cpu_can_run cpu_can_run_mips64el
#define cpu_check_watchpoint cpu_check_watchpoint_mips64el
#define cpu_class_init cpu_class_init_mips64el
#define cpu_common_class_by_name cpu_common_class_by_name_mips64el
#define cpu_common_exec_interrupt cpu_common_exec_interrupt_mips64el

View file

@ -263,6 +263,7 @@
#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_mipsel
#define cpu_can_do_io cpu_can_do_io_mipsel
#define cpu_can_run cpu_can_run_mipsel
#define cpu_check_watchpoint cpu_check_watchpoint_mipsel
#define cpu_class_init cpu_class_init_mipsel
#define cpu_common_class_by_name cpu_common_class_by_name_mipsel
#define cpu_common_exec_interrupt cpu_common_exec_interrupt_mipsel

View file

@ -263,6 +263,7 @@
#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_powerpc
#define cpu_can_do_io cpu_can_do_io_powerpc
#define cpu_can_run cpu_can_run_powerpc
#define cpu_check_watchpoint cpu_check_watchpoint_powerpc
#define cpu_class_init cpu_class_init_powerpc
#define cpu_common_class_by_name cpu_common_class_by_name_powerpc
#define cpu_common_exec_interrupt cpu_common_exec_interrupt_powerpc

View file

@ -263,6 +263,7 @@
#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_riscv32
#define cpu_can_do_io cpu_can_do_io_riscv32
#define cpu_can_run cpu_can_run_riscv32
#define cpu_check_watchpoint cpu_check_watchpoint_riscv32
#define cpu_class_init cpu_class_init_riscv32
#define cpu_common_class_by_name cpu_common_class_by_name_riscv32
#define cpu_common_exec_interrupt cpu_common_exec_interrupt_riscv32

View file

@ -263,6 +263,7 @@
#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_riscv64
#define cpu_can_do_io cpu_can_do_io_riscv64
#define cpu_can_run cpu_can_run_riscv64
#define cpu_check_watchpoint cpu_check_watchpoint_riscv64
#define cpu_class_init cpu_class_init_riscv64
#define cpu_common_class_by_name cpu_common_class_by_name_riscv64
#define cpu_common_exec_interrupt cpu_common_exec_interrupt_riscv64

View file

@ -263,6 +263,7 @@
#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_sparc
#define cpu_can_do_io cpu_can_do_io_sparc
#define cpu_can_run cpu_can_run_sparc
#define cpu_check_watchpoint cpu_check_watchpoint_sparc
#define cpu_class_init cpu_class_init_sparc
#define cpu_common_class_by_name cpu_common_class_by_name_sparc
#define cpu_common_exec_interrupt cpu_common_exec_interrupt_sparc

View file

@ -263,6 +263,7 @@
#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_sparc64
#define cpu_can_do_io cpu_can_do_io_sparc64
#define cpu_can_run cpu_can_run_sparc64
#define cpu_check_watchpoint cpu_check_watchpoint_sparc64
#define cpu_class_init cpu_class_init_sparc64
#define cpu_common_class_by_name cpu_common_class_by_name_sparc64
#define cpu_common_exec_interrupt cpu_common_exec_interrupt_sparc64

View file

@ -263,6 +263,7 @@
#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_x86_64
#define cpu_can_do_io cpu_can_do_io_x86_64
#define cpu_can_run cpu_can_run_x86_64
#define cpu_check_watchpoint cpu_check_watchpoint_x86_64
#define cpu_class_init cpu_class_init_x86_64
#define cpu_common_class_by_name cpu_common_class_by_name_x86_64
#define cpu_common_exec_interrupt cpu_common_exec_interrupt_x86_64