mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2025-06-29 11:29:39 +00:00
tcg: move locking for tb_invalidate_phys_page_range up
In the linux-user case all things that involve ''l1_map' and PageDesc tweaks are protected by the memory lock (mmpa_lock). For SoftMMU mode we previously relied on single threaded behaviour, with MTTCG we now use the tb_lock(). As a result we need to do a little re-factoring and push the taking of this lock up the call tree. This requires a slightly different entry for the SoftMMU and user-mode cases from tb_invalidate_phys_range. This also means user-mode breakpoint insertion needs to take two locks but it hadn't taken any previously so this is an improvement. Backpoirts commit ba051fb5e56d5ff5e4fa672d37954452e58543b2 from qemu
This commit is contained in:
parent
9d64a89acf
commit
da124da4b1
|
@ -524,7 +524,12 @@ void cpu_exec_init(CPUState *cpu, void *opaque)
|
||||||
#if defined(CONFIG_USER_ONLY)
|
#if defined(CONFIG_USER_ONLY)
|
||||||
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
|
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
|
||||||
{
|
{
|
||||||
|
// Unicorn: commented out
|
||||||
|
mmap_lock();
|
||||||
|
//tb_lock();
|
||||||
tb_invalidate_phys_page_range(pc, pc + 1, 0);
|
tb_invalidate_phys_page_range(pc, pc + 1, 0);
|
||||||
|
//tb_unlock();
|
||||||
|
mmap_unlock();
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
|
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
|
||||||
|
@ -533,6 +538,7 @@ static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
|
||||||
hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
|
hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
|
||||||
int asidx = cpu_asidx_from_attrs(cpu, attrs);
|
int asidx = cpu_asidx_from_attrs(cpu, attrs);
|
||||||
if (phys != -1) {
|
if (phys != -1) {
|
||||||
|
/* Locks grabbed by tb_invalidate_phys_addr */
|
||||||
tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
|
tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
|
||||||
phys | (pc & ~TARGET_PAGE_MASK));
|
phys | (pc & ~TARGET_PAGE_MASK));
|
||||||
}
|
}
|
||||||
|
|
|
@ -1461,8 +1461,11 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||||
* 'is_cpu_write_access' should be true if called from a real cpu write
|
* 'is_cpu_write_access' should be true if called from a real cpu write
|
||||||
* access: the virtual CPU will exit the current TB if code is modified inside
|
* access: the virtual CPU will exit the current TB if code is modified inside
|
||||||
* this TB.
|
* this TB.
|
||||||
|
*
|
||||||
|
* Called with mmap_lock held for user-mode emulation, grabs tb_lock
|
||||||
|
* Called with tb_lock held for system-mode emulation
|
||||||
*/
|
*/
|
||||||
void tb_invalidate_phys_range(struct uc_struct *uc, tb_page_addr_t start, tb_page_addr_t end)
|
static void tb_invalidate_phys_range_1(struct uc_struct *uc, tb_page_addr_t start, tb_page_addr_t end)
|
||||||
{
|
{
|
||||||
while (start < end) {
|
while (start < end) {
|
||||||
tb_invalidate_phys_page_range(uc, start, end, 0);
|
tb_invalidate_phys_page_range(uc, start, end, 0);
|
||||||
|
@ -1471,12 +1474,33 @@ void tb_invalidate_phys_range(struct uc_struct *uc, tb_page_addr_t start, tb_pag
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_SOFTMMU
|
||||||
|
void tb_invalidate_phys_range(struct uc_struct *uc, tb_page_addr_t start, tb_page_addr_t end)
|
||||||
|
{
|
||||||
|
// Unicorn: commented out
|
||||||
|
//assert_tb_lock();
|
||||||
|
tb_invalidate_phys_range_1(uc, start, end);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
void tb_invalidate_phys_range(struct uc_struct *uc, tb_page_addr_t start, tb_page_addr_t end)
|
||||||
|
{
|
||||||
|
// Unicorn: commented out
|
||||||
|
//assert_memory_lock();
|
||||||
|
//tb_lock();
|
||||||
|
tb_invalidate_phys_range_1(uc, start, end);
|
||||||
|
//tb_unlock();
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Invalidate all TBs which intersect with the target physical address range
|
* Invalidate all TBs which intersect with the target physical address range
|
||||||
* [start;end[. NOTE: start and end must refer to the *same* physical page.
|
* [start;end[. NOTE: start and end must refer to the *same* physical page.
|
||||||
* 'is_cpu_write_access' should be true if called from a real cpu write
|
* 'is_cpu_write_access' should be true if called from a real cpu write
|
||||||
* access: the virtual CPU will exit the current TB if code is modified inside
|
* access: the virtual CPU will exit the current TB if code is modified inside
|
||||||
* this TB.
|
* this TB.
|
||||||
|
*
|
||||||
|
* Called with tb_lock/mmap_lock held for user-mode emulation
|
||||||
|
* Called with tb_lock held for system-mode emulation
|
||||||
*/
|
*/
|
||||||
void tb_invalidate_phys_page_range(struct uc_struct *uc, tb_page_addr_t start, tb_page_addr_t end,
|
void tb_invalidate_phys_page_range(struct uc_struct *uc, tb_page_addr_t start, tb_page_addr_t end,
|
||||||
int is_cpu_write_access)
|
int is_cpu_write_access)
|
||||||
|
@ -1498,6 +1522,10 @@ void tb_invalidate_phys_page_range(struct uc_struct *uc, tb_page_addr_t start, t
|
||||||
uint32_t current_flags = 0;
|
uint32_t current_flags = 0;
|
||||||
#endif /* TARGET_HAS_PRECISE_SMC */
|
#endif /* TARGET_HAS_PRECISE_SMC */
|
||||||
|
|
||||||
|
// Unicorn: commented out
|
||||||
|
//assert_memory_lock();
|
||||||
|
//assert_tb_lock();
|
||||||
|
|
||||||
p = page_find(uc, start >> TARGET_PAGE_BITS);
|
p = page_find(uc, start >> TARGET_PAGE_BITS);
|
||||||
if (!p) {
|
if (!p) {
|
||||||
return;
|
return;
|
||||||
|
@ -1574,7 +1602,10 @@ void tb_invalidate_phys_page_range(struct uc_struct *uc, tb_page_addr_t start, t
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SOFTMMU
|
#ifdef CONFIG_SOFTMMU
|
||||||
/* len must be <= 8 and start must be a multiple of len */
|
/* len must be <= 8 and start must be a multiple of len.
|
||||||
|
* Called via softmmu_template.h when code areas are written to with
|
||||||
|
* tb_lock held.
|
||||||
|
*/
|
||||||
void tb_invalidate_phys_page_fast(struct uc_struct* uc, tb_page_addr_t start, int len)
|
void tb_invalidate_phys_page_fast(struct uc_struct* uc, tb_page_addr_t start, int len)
|
||||||
{
|
{
|
||||||
PageDesc *p;
|
PageDesc *p;
|
||||||
|
|
Loading…
Reference in a new issue