exec: Introduce AddressSpaceDispatch.mru_section

Under heavy workloads the lookup will likely end up with the same
MemoryRegionSection from last time. Using a pointer to cache the result,
like ram_list.mru_block, significantly reduces cost of
address_space_translate.

During address space topology update, as->dispatch will be reallocated
so the pointer is invalidated automatically.

Perf reports a visible drop on the cpu usage, because phys_page_find is
not called. Before:

2.35% qemu-system-x86_64 [.] phys_page_find
0.97% qemu-system-x86_64 [.] address_space_translate_internal
0.95% qemu-system-x86_64 [.] address_space_translate
0.55% qemu-system-x86_64 [.] address_space_lookup_region

After:

0.97% qemu-system-x86_64 [.] address_space_translate_internal
0.97% qemu-system-x86_64 [.] address_space_lookup_region
0.84% qemu-system-x86_64 [.] address_space_translate

Backports commit 729633c2bc30496073431584eb6e304776b4ebd4 from qemu
This commit is contained in:
Fam Zheng 2018-02-21 21:10:12 -05:00 committed by Lioncash
parent f642465dc4
commit f7bff04b7b
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7

View file

@ -103,6 +103,7 @@ typedef struct PhysPageMap {
} PhysPageMap;
struct AddressSpaceDispatch {
MemoryRegionSection *mru_section;
/* This is a multi-level map on the physical address space.
* The bottom level has pointers to MemoryRegionSections.
*/
@ -300,17 +301,28 @@ bool memory_region_is_unassigned(struct uc_struct* uc, MemoryRegion *mr)
}
static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
hwaddr addr,
bool resolve_subpage)
hwaddr addr,
bool resolve_subpage)
{
MemoryRegionSection *section;
MemoryRegionSection *section = atomic_read(&d->mru_section);
subpage_t *subpage;
bool update;
section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
section_covers_addr(section, addr)) {
update = false;
} else {
section = phys_page_find(d->phys_map, addr, d->map.nodes,
d->map.sections);
update = true;
}
if (resolve_subpage && section->mr->subpage) {
subpage = container_of(section->mr, subpage_t, iomem);
section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
}
if (update) {
atomic_set(&d->mru_section, section);
}
return section;
}