mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2024-12-22 20:25:38 +00:00
exec: Drop unnecessary code for unicorn
The dirty memory code isn't strictly necessary
This commit is contained in:
parent
b28c64ed34
commit
a81439c7ca
|
@ -309,17 +309,12 @@
|
|||
#define cpu_outw cpu_outw_aarch64
|
||||
#define cpu_physical_memory_all_dirty cpu_physical_memory_all_dirty_aarch64
|
||||
#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_aarch64
|
||||
#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_aarch64
|
||||
#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_aarch64
|
||||
#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_aarch64
|
||||
#define cpu_physical_memory_is_io cpu_physical_memory_is_io_aarch64
|
||||
#define cpu_physical_memory_map cpu_physical_memory_map_aarch64
|
||||
#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_aarch64
|
||||
#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_aarch64
|
||||
#define cpu_physical_memory_rw cpu_physical_memory_rw_aarch64
|
||||
#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_aarch64
|
||||
#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_aarch64
|
||||
#define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_aarch64
|
||||
#define cpu_physical_memory_unmap cpu_physical_memory_unmap_aarch64
|
||||
#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_aarch64
|
||||
#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_aarch64
|
||||
|
@ -2932,13 +2927,11 @@
|
|||
#define tlb_flush_page tlb_flush_page_aarch64
|
||||
#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_aarch64
|
||||
#define tlb_is_dirty_ram tlb_is_dirty_ram_aarch64
|
||||
#define tlb_protect_code tlb_protect_code_aarch64
|
||||
#define tlb_reset_dirty tlb_reset_dirty_aarch64
|
||||
#define tlb_reset_dirty_range tlb_reset_dirty_range_aarch64
|
||||
#define tlb_set_dirty tlb_set_dirty_aarch64
|
||||
#define tlb_set_page tlb_set_page_aarch64
|
||||
#define tlb_set_page_with_attrs tlb_set_page_with_attrs_aarch64
|
||||
#define tlb_unprotect_code tlb_unprotect_code_aarch64
|
||||
#define tlb_vaddr_to_host tlb_vaddr_to_host_aarch64
|
||||
#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_aarch64
|
||||
#define tlbi_aa64_asid_write tlbi_aa64_asid_write_aarch64
|
||||
|
|
|
@ -309,17 +309,12 @@
|
|||
#define cpu_outw cpu_outw_aarch64eb
|
||||
#define cpu_physical_memory_all_dirty cpu_physical_memory_all_dirty_aarch64eb
|
||||
#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_aarch64eb
|
||||
#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_aarch64eb
|
||||
#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_aarch64eb
|
||||
#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_aarch64eb
|
||||
#define cpu_physical_memory_is_io cpu_physical_memory_is_io_aarch64eb
|
||||
#define cpu_physical_memory_map cpu_physical_memory_map_aarch64eb
|
||||
#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_aarch64eb
|
||||
#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_aarch64eb
|
||||
#define cpu_physical_memory_rw cpu_physical_memory_rw_aarch64eb
|
||||
#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_aarch64eb
|
||||
#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_aarch64eb
|
||||
#define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_aarch64eb
|
||||
#define cpu_physical_memory_unmap cpu_physical_memory_unmap_aarch64eb
|
||||
#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_aarch64eb
|
||||
#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_aarch64eb
|
||||
|
@ -2932,13 +2927,11 @@
|
|||
#define tlb_flush_page tlb_flush_page_aarch64eb
|
||||
#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_aarch64eb
|
||||
#define tlb_is_dirty_ram tlb_is_dirty_ram_aarch64eb
|
||||
#define tlb_protect_code tlb_protect_code_aarch64eb
|
||||
#define tlb_reset_dirty tlb_reset_dirty_aarch64eb
|
||||
#define tlb_reset_dirty_range tlb_reset_dirty_range_aarch64eb
|
||||
#define tlb_set_dirty tlb_set_dirty_aarch64eb
|
||||
#define tlb_set_page tlb_set_page_aarch64eb
|
||||
#define tlb_set_page_with_attrs tlb_set_page_with_attrs_aarch64eb
|
||||
#define tlb_unprotect_code tlb_unprotect_code_aarch64eb
|
||||
#define tlb_vaddr_to_host tlb_vaddr_to_host_aarch64eb
|
||||
#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_aarch64eb
|
||||
#define tlbi_aa64_asid_write tlbi_aa64_asid_write_aarch64eb
|
||||
|
|
|
@ -309,17 +309,12 @@
|
|||
#define cpu_outw cpu_outw_arm
|
||||
#define cpu_physical_memory_all_dirty cpu_physical_memory_all_dirty_arm
|
||||
#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_arm
|
||||
#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_arm
|
||||
#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_arm
|
||||
#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_arm
|
||||
#define cpu_physical_memory_is_io cpu_physical_memory_is_io_arm
|
||||
#define cpu_physical_memory_map cpu_physical_memory_map_arm
|
||||
#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_arm
|
||||
#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_arm
|
||||
#define cpu_physical_memory_rw cpu_physical_memory_rw_arm
|
||||
#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_arm
|
||||
#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_arm
|
||||
#define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_arm
|
||||
#define cpu_physical_memory_unmap cpu_physical_memory_unmap_arm
|
||||
#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_arm
|
||||
#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_arm
|
||||
|
@ -2932,13 +2927,11 @@
|
|||
#define tlb_flush_page tlb_flush_page_arm
|
||||
#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_arm
|
||||
#define tlb_is_dirty_ram tlb_is_dirty_ram_arm
|
||||
#define tlb_protect_code tlb_protect_code_arm
|
||||
#define tlb_reset_dirty tlb_reset_dirty_arm
|
||||
#define tlb_reset_dirty_range tlb_reset_dirty_range_arm
|
||||
#define tlb_set_dirty tlb_set_dirty_arm
|
||||
#define tlb_set_page tlb_set_page_arm
|
||||
#define tlb_set_page_with_attrs tlb_set_page_with_attrs_arm
|
||||
#define tlb_unprotect_code tlb_unprotect_code_arm
|
||||
#define tlb_vaddr_to_host tlb_vaddr_to_host_arm
|
||||
#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_arm
|
||||
#define tlbi_aa64_asid_write tlbi_aa64_asid_write_arm
|
||||
|
|
|
@ -309,17 +309,12 @@
|
|||
#define cpu_outw cpu_outw_armeb
|
||||
#define cpu_physical_memory_all_dirty cpu_physical_memory_all_dirty_armeb
|
||||
#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_armeb
|
||||
#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_armeb
|
||||
#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_armeb
|
||||
#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_armeb
|
||||
#define cpu_physical_memory_is_io cpu_physical_memory_is_io_armeb
|
||||
#define cpu_physical_memory_map cpu_physical_memory_map_armeb
|
||||
#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_armeb
|
||||
#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_armeb
|
||||
#define cpu_physical_memory_rw cpu_physical_memory_rw_armeb
|
||||
#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_armeb
|
||||
#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_armeb
|
||||
#define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_armeb
|
||||
#define cpu_physical_memory_unmap cpu_physical_memory_unmap_armeb
|
||||
#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_armeb
|
||||
#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_armeb
|
||||
|
@ -2932,13 +2927,11 @@
|
|||
#define tlb_flush_page tlb_flush_page_armeb
|
||||
#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_armeb
|
||||
#define tlb_is_dirty_ram tlb_is_dirty_ram_armeb
|
||||
#define tlb_protect_code tlb_protect_code_armeb
|
||||
#define tlb_reset_dirty tlb_reset_dirty_armeb
|
||||
#define tlb_reset_dirty_range tlb_reset_dirty_range_armeb
|
||||
#define tlb_set_dirty tlb_set_dirty_armeb
|
||||
#define tlb_set_page tlb_set_page_armeb
|
||||
#define tlb_set_page_with_attrs tlb_set_page_with_attrs_armeb
|
||||
#define tlb_unprotect_code tlb_unprotect_code_armeb
|
||||
#define tlb_vaddr_to_host tlb_vaddr_to_host_armeb
|
||||
#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_armeb
|
||||
#define tlbi_aa64_asid_write tlbi_aa64_asid_write_armeb
|
||||
|
|
|
@ -249,9 +249,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
|
|||
|| memory_region_is_romd(section->mr)) {
|
||||
/* Write access calls the I/O callback. */
|
||||
te->addr_write = address | TLB_MMIO;
|
||||
} else if (memory_region_is_ram(section->mr)
|
||||
&& cpu_physical_memory_is_clean(cpu->uc,
|
||||
memory_region_get_ram_addr(section->mr) + xlat)) {
|
||||
} else if (memory_region_is_ram(section->mr)) {
|
||||
te->addr_write = address | TLB_NOTDIRTY;
|
||||
} else {
|
||||
te->addr_write = address;
|
||||
|
@ -452,21 +450,6 @@ void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
|
|||
tb_flush_jmp_cache(cpu, addr);
|
||||
}
|
||||
|
||||
/* update the TLBs so that writes to code in the virtual page 'addr'
|
||||
can be detected */
|
||||
void tlb_protect_code(struct uc_struct *uc, ram_addr_t ram_addr)
|
||||
{
|
||||
cpu_physical_memory_test_and_clear_dirty(uc, ram_addr, TARGET_PAGE_SIZE,
|
||||
DIRTY_MEMORY_CODE);
|
||||
}
|
||||
|
||||
/* update the TLB so that writes in physical page 'phys_addr' are no longer
|
||||
tested for self modifying code */
|
||||
void tlb_unprotect_code(CPUState *cpu, ram_addr_t ram_addr)
|
||||
{
|
||||
cpu_physical_memory_set_dirty_flag(cpu->uc, ram_addr, DIRTY_MEMORY_CODE);
|
||||
}
|
||||
|
||||
static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
||||
int mmu_idx,
|
||||
target_ulong addr, uintptr_t retaddr, int size)
|
||||
|
|
152
qemu/exec.c
152
qemu/exec.c
|
@ -886,65 +886,6 @@ found:
|
|||
return block;
|
||||
}
|
||||
|
||||
static void tlb_reset_dirty_range_all(struct uc_struct* uc,
|
||||
ram_addr_t start, ram_addr_t length)
|
||||
{
|
||||
ram_addr_t start1;
|
||||
RAMBlock *block;
|
||||
ram_addr_t end;
|
||||
|
||||
end = TARGET_PAGE_ALIGN(start + length);
|
||||
start &= TARGET_PAGE_MASK;
|
||||
|
||||
block = qemu_get_ram_block(uc, start);
|
||||
assert(block == qemu_get_ram_block(uc, end - 1));
|
||||
start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
|
||||
tlb_reset_dirty(uc->cpu, start1, length);
|
||||
}
|
||||
|
||||
/* Note: start and end must be within the same ram block. */
|
||||
bool cpu_physical_memory_test_and_clear_dirty(struct uc_struct *uc,
|
||||
ram_addr_t start,
|
||||
ram_addr_t length,
|
||||
unsigned client)
|
||||
{
|
||||
DirtyMemoryBlocks *blocks;
|
||||
unsigned long end, page;
|
||||
bool dirty = false;
|
||||
|
||||
if (length == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
|
||||
page = start >> TARGET_PAGE_BITS;
|
||||
|
||||
// Unicorn: commented out
|
||||
//rcu_read_lock();
|
||||
|
||||
// Unicorn: atomic_read instead of atomic_rcu_read used
|
||||
blocks = atomic_read(&uc->ram_list.dirty_memory[client]);
|
||||
|
||||
while (page < end) {
|
||||
unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
|
||||
unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
|
||||
unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
|
||||
|
||||
dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
|
||||
offset, num);
|
||||
page += num;
|
||||
}
|
||||
|
||||
// Unicorn: commented out
|
||||
//rcu_read_unlock();
|
||||
|
||||
if (dirty && tcg_enabled(uc)) {
|
||||
tlb_reset_dirty_range_all(uc, start, length);
|
||||
}
|
||||
|
||||
return dirty;
|
||||
}
|
||||
|
||||
hwaddr memory_region_section_get_iotlb(CPUState *cpu,
|
||||
MemoryRegionSection *section,
|
||||
target_ulong vaddr,
|
||||
|
@ -1238,10 +1179,8 @@ int qemu_ram_resize(struct uc_struct *uc, RAMBlock *block, ram_addr_t newsize, E
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
cpu_physical_memory_clear_dirty_range(uc, block->offset, block->used_length);
|
||||
block->used_length = newsize;
|
||||
cpu_physical_memory_set_dirty_range(uc, block->offset, block->used_length,
|
||||
DIRTY_CLIENTS_ALL);
|
||||
|
||||
memory_region_set_size(block->mr, newsize);
|
||||
if (block->resized) {
|
||||
block->resized(block->idstr, newsize, block->host);
|
||||
|
@ -1249,51 +1188,6 @@ int qemu_ram_resize(struct uc_struct *uc, RAMBlock *block, ram_addr_t newsize, E
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Called with ram_list.mutex held */
|
||||
static void dirty_memory_extend(struct uc_struct *uc,
|
||||
ram_addr_t old_ram_size,
|
||||
ram_addr_t new_ram_size)
|
||||
{
|
||||
ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
|
||||
DIRTY_MEMORY_BLOCK_SIZE);
|
||||
ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
|
||||
DIRTY_MEMORY_BLOCK_SIZE);
|
||||
int i;
|
||||
|
||||
/* Only need to extend if block count increased */
|
||||
if (new_num_blocks <= old_num_blocks) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
|
||||
DirtyMemoryBlocks *old_blocks;
|
||||
DirtyMemoryBlocks *new_blocks;
|
||||
int j;
|
||||
|
||||
// Unicorn: atomic_read used instead of atomic_rcu_read
|
||||
old_blocks = atomic_read(&uc->ram_list.dirty_memory[i]);
|
||||
new_blocks = g_malloc(sizeof(*new_blocks) +
|
||||
sizeof(new_blocks->blocks[0]) * new_num_blocks);
|
||||
// Unicorn: unicorn-specific variable to make memory handling less painful.
|
||||
new_blocks->num_blocks = new_num_blocks;
|
||||
|
||||
if (old_num_blocks) {
|
||||
memcpy(new_blocks->blocks, old_blocks->blocks,
|
||||
old_num_blocks * sizeof(old_blocks->blocks[0]));
|
||||
}
|
||||
|
||||
for (j = old_num_blocks; j < new_num_blocks; j++) {
|
||||
new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
// Unicorn: atomic_set used instead of atomic_rcu_set
|
||||
atomic_set(&uc->ram_list.dirty_memory[i], new_blocks);
|
||||
|
||||
// Unicorn: g_free used instead of g_free_rcu
|
||||
g_free(old_blocks);
|
||||
}
|
||||
}
|
||||
|
||||
static void ram_block_add(struct uc_struct *uc, RAMBlock *new_block, Error **errp)
|
||||
{
|
||||
RAMBlock *block;
|
||||
|
@ -1318,11 +1212,7 @@ static void ram_block_add(struct uc_struct *uc, RAMBlock *new_block, Error **err
|
|||
|
||||
new_ram_size = MAX(old_ram_size,
|
||||
(new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
|
||||
if (new_ram_size > old_ram_size) {
|
||||
// Unicorn: commented out
|
||||
//migration_bitmap_extend(old_ram_size, new_ram_size);
|
||||
dirty_memory_extend(uc, old_ram_size, new_ram_size);
|
||||
}
|
||||
|
||||
/* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
|
||||
* QLIST (which has an RCU-friendly variant) does not have insertion at
|
||||
* tail, so save the last element in last_block.
|
||||
|
@ -1346,10 +1236,6 @@ static void ram_block_add(struct uc_struct *uc, RAMBlock *new_block, Error **err
|
|||
smp_wmb();
|
||||
uc->ram_list.version++;
|
||||
|
||||
cpu_physical_memory_set_dirty_range(uc, new_block->offset,
|
||||
new_block->used_length,
|
||||
DIRTY_CLIENTS_ALL);
|
||||
|
||||
if (new_block->host) {
|
||||
qemu_ram_setup_dump(new_block->host, new_block->max_length);
|
||||
// Unicorn: commented out
|
||||
|
@ -1756,9 +1642,6 @@ static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
|
|||
static void notdirty_mem_write(struct uc_struct* uc, void *opaque, hwaddr ram_addr,
|
||||
uint64_t val, unsigned size)
|
||||
{
|
||||
if (!cpu_physical_memory_get_dirty_flag(uc, ram_addr, DIRTY_MEMORY_CODE)) {
|
||||
tb_invalidate_phys_page_fast(uc, ram_addr, size);
|
||||
}
|
||||
switch (size) {
|
||||
case 1:
|
||||
stb_p(qemu_map_ram_ptr(uc, NULL, ram_addr), val);
|
||||
|
@ -1772,11 +1655,6 @@ static void notdirty_mem_write(struct uc_struct* uc, void *opaque, hwaddr ram_ad
|
|||
default:
|
||||
abort();
|
||||
}
|
||||
/* we remove the notdirty callback only if the code has been
|
||||
flushed */
|
||||
if (!cpu_physical_memory_is_clean(uc, ram_addr)) {
|
||||
tlb_set_dirty(uc->current_cpu, uc->current_cpu->mem_io_vaddr);
|
||||
}
|
||||
}
|
||||
|
||||
static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
|
||||
|
@ -1974,23 +1852,6 @@ int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
|
|||
|
||||
#else
|
||||
|
||||
static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
|
||||
hwaddr length)
|
||||
{
|
||||
uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
|
||||
addr += memory_region_get_ram_addr(mr);
|
||||
|
||||
if (dirty_log_mask) {
|
||||
dirty_log_mask =
|
||||
cpu_physical_memory_range_includes_clean(mr->uc, addr, length, dirty_log_mask);
|
||||
}
|
||||
if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
|
||||
tb_invalidate_phys_range(mr->uc, addr, addr + length);
|
||||
dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
|
||||
}
|
||||
cpu_physical_memory_set_dirty_range(mr->uc, addr, length, dirty_log_mask);
|
||||
}
|
||||
|
||||
static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
|
||||
{
|
||||
unsigned access_size_max = mr->ops->valid.max_access_size;
|
||||
|
@ -2072,7 +1933,6 @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
|
|||
/* RAM case */
|
||||
ptr = qemu_map_ram_ptr(mr->uc, mr->ram_block, addr1);
|
||||
memcpy(ptr, buf, l);
|
||||
invalidate_and_set_dirty(mr, addr1, l);
|
||||
}
|
||||
|
||||
/* Unicorn: commented out
|
||||
|
@ -2271,7 +2131,6 @@ static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
|
|||
switch (type) {
|
||||
case WRITE_DATA:
|
||||
memcpy(ptr, buf, l);
|
||||
invalidate_and_set_dirty(mr, addr1, l);
|
||||
break;
|
||||
case FLUSH_CACHE:
|
||||
flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
|
||||
|
@ -2431,9 +2290,6 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
|
|||
|
||||
mr = memory_region_from_host(as->uc, buffer, &addr1);
|
||||
assert(mr != NULL);
|
||||
if (is_write) {
|
||||
invalidate_and_set_dirty(mr, addr1, access_len);
|
||||
}
|
||||
memory_region_unref(mr);
|
||||
return;
|
||||
}
|
||||
|
@ -2466,7 +2322,7 @@ void cpu_physical_memory_unmap(AddressSpace *as, void *buffer, hwaddr len,
|
|||
#define TRANSLATE(...) address_space_translate(as, __VA_ARGS__)
|
||||
#define IS_DIRECT(mr, is_write) memory_access_is_direct(mr, is_write)
|
||||
#define MAP_RAM(mr, ofs) qemu_map_ram_ptr((mr)->uc, (mr)->ram_block, ofs)
|
||||
#define INVALIDATE(mr, ofs, len) invalidate_and_set_dirty(mr, ofs, len)
|
||||
#define INVALIDATE(mr, ofs, len)
|
||||
#define RCU_READ_LOCK(...) rcu_read_lock()
|
||||
#define RCU_READ_UNLOCK(...) rcu_read_unlock()
|
||||
#include "memory_ldst.inc.c"
|
||||
|
@ -2556,7 +2412,7 @@ void address_space_cache_destroy(MemoryRegionCache *cache)
|
|||
address_space_translate(cache->as, cache->xlat + (addr), __VA_ARGS__)
|
||||
#define IS_DIRECT(mr, is_write) true
|
||||
#define MAP_RAM(mr, ofs) qemu_map_ram_ptr((mr)->uc, (mr)->ram_block, ofs)
|
||||
#define INVALIDATE(mr, ofs, len) invalidate_and_set_dirty(mr, ofs, len)
|
||||
#define INVALIDATE(mr, ofs, len)
|
||||
#define RCU_READ_LOCK() //rcu_read_lock()
|
||||
#define RCU_READ_UNLOCK() //rcu_read_unlock()
|
||||
#include "memory_ldst.inc.c"
|
||||
|
|
|
@ -315,17 +315,12 @@ symbols = (
|
|||
'cpu_outw',
|
||||
'cpu_physical_memory_all_dirty',
|
||||
'cpu_physical_memory_clear_dirty_range',
|
||||
'cpu_physical_memory_get_dirty',
|
||||
'cpu_physical_memory_get_dirty_flag',
|
||||
'cpu_physical_memory_is_clean',
|
||||
'cpu_physical_memory_is_io',
|
||||
'cpu_physical_memory_map',
|
||||
'cpu_physical_memory_range_includes_clean',
|
||||
'cpu_physical_memory_reset_dirty',
|
||||
'cpu_physical_memory_rw',
|
||||
'cpu_physical_memory_set_dirty_flag',
|
||||
'cpu_physical_memory_set_dirty_range',
|
||||
'cpu_physical_memory_test_and_clear_dirty',
|
||||
'cpu_physical_memory_unmap',
|
||||
'cpu_physical_memory_write_rom',
|
||||
'cpu_physical_memory_write_rom_internal',
|
||||
|
@ -2938,13 +2933,11 @@ symbols = (
|
|||
'tlb_flush_page',
|
||||
'tlb_flush_page_by_mmuidx',
|
||||
'tlb_is_dirty_ram',
|
||||
'tlb_protect_code',
|
||||
'tlb_reset_dirty',
|
||||
'tlb_reset_dirty_range',
|
||||
'tlb_set_dirty',
|
||||
'tlb_set_page',
|
||||
'tlb_set_page_with_attrs',
|
||||
'tlb_unprotect_code',
|
||||
'tlb_vaddr_to_host',
|
||||
'tlbi_aa64_asid_is_write',
|
||||
'tlbi_aa64_asid_write',
|
||||
|
|
|
@ -21,8 +21,6 @@
|
|||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
/* cputlb.c */
|
||||
void tlb_protect_code(struct uc_struct *uc, ram_addr_t ram_addr);
|
||||
void tlb_unprotect_code(CPUState *cpu, ram_addr_t ram_addr);
|
||||
void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
|
||||
uintptr_t start, uintptr_t length);
|
||||
//extern int tlb_flush_count;
|
||||
|
|
|
@ -157,17 +157,6 @@ static inline bool cpu_physical_memory_all_dirty(struct uc_struct *uc, ram_addr_
|
|||
return dirty;
|
||||
}
|
||||
|
||||
static inline bool cpu_physical_memory_get_dirty_flag(struct uc_struct *uc, ram_addr_t addr,
|
||||
unsigned client)
|
||||
{
|
||||
return cpu_physical_memory_get_dirty(uc, addr, 1, client);
|
||||
}
|
||||
|
||||
static inline bool cpu_physical_memory_is_clean(struct uc_struct *uc, ram_addr_t addr)
|
||||
{
|
||||
return !cpu_physical_memory_get_dirty_flag(uc, addr, DIRTY_MEMORY_CODE);
|
||||
}
|
||||
|
||||
static inline bool cpu_physical_memory_range_includes_clean(struct uc_struct *uc, ram_addr_t start,
|
||||
ram_addr_t length, uint8_t mask)
|
||||
{
|
||||
|
@ -204,136 +193,5 @@ static inline void cpu_physical_memory_set_dirty_flag(struct uc_struct *uc, ram_
|
|||
//rcu_read_unlock();
|
||||
}
|
||||
|
||||
static inline void cpu_physical_memory_set_dirty_range(struct uc_struct *uc, ram_addr_t start,
|
||||
ram_addr_t length,
|
||||
uint8_t mask)
|
||||
{
|
||||
DirtyMemoryBlocks *blocks[DIRTY_MEMORY_NUM];
|
||||
unsigned long end, page;
|
||||
unsigned long idx, offset, base;
|
||||
int i;
|
||||
|
||||
if (!mask && !xen_enabled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
|
||||
page = start >> TARGET_PAGE_BITS;
|
||||
|
||||
// Unicorn: commented out
|
||||
//rcu_read_lock();
|
||||
|
||||
for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
|
||||
// Unicorn: atomic_read used instead of atomic_rcu_read
|
||||
blocks[i] = atomic_read(&uc->ram_list.dirty_memory[i]);
|
||||
}
|
||||
|
||||
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
|
||||
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
|
||||
base = page - offset;
|
||||
while (page < end) {
|
||||
unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
|
||||
|
||||
if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
|
||||
bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
|
||||
offset, next - page);
|
||||
}
|
||||
|
||||
page = next;
|
||||
idx++;
|
||||
offset = 0;
|
||||
base += DIRTY_MEMORY_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
// Unicorn: commented out
|
||||
//rcu_read_unlock();
|
||||
}
|
||||
|
||||
#if !defined(_WIN32)
|
||||
static inline void cpu_physical_memory_set_dirty_lebitmap(struct uc_struct *uc, unsigned long *bitmap,
|
||||
ram_addr_t start,
|
||||
ram_addr_t pages)
|
||||
{
|
||||
unsigned long i, j;
|
||||
unsigned long page_number, c;
|
||||
hwaddr addr;
|
||||
ram_addr_t ram_addr;
|
||||
unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
|
||||
unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
|
||||
unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
|
||||
|
||||
/* start address is aligned at the start of a word? */
|
||||
if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
|
||||
(hpratio == 1)) {
|
||||
unsigned long **blocks[DIRTY_MEMORY_NUM];
|
||||
unsigned long idx;
|
||||
unsigned long offset;
|
||||
long k;
|
||||
long nr = BITS_TO_LONGS(pages);
|
||||
|
||||
idx = (start >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
|
||||
offset = BIT_WORD((start >> TARGET_PAGE_BITS) %
|
||||
DIRTY_MEMORY_BLOCK_SIZE);
|
||||
|
||||
// Unicorn: commented out
|
||||
//rcu_read_lock();
|
||||
|
||||
for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
|
||||
// Unicorn: atomic_read used instead of atomic_rcu_read
|
||||
blocks[i] = atomic_read(&uc->ram_list.dirty_memory[i])->blocks;
|
||||
}
|
||||
|
||||
for (k = 0; k < nr; k++) {
|
||||
if (bitmap[k]) {
|
||||
unsigned long temp = leul_to_cpu(bitmap[k]);
|
||||
|
||||
if (tcg_enabled(uc)) {
|
||||
atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], temp);
|
||||
}
|
||||
}
|
||||
|
||||
if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
|
||||
offset = 0;
|
||||
idx++;
|
||||
}
|
||||
}
|
||||
|
||||
// Unicorn: commented out
|
||||
//rcu_read_unlock();
|
||||
} else {
|
||||
uint8_t clients = tcg_enabled(uc) ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
|
||||
/*
|
||||
* bitmap-traveling is faster than memory-traveling (for addr...)
|
||||
* especially when most of the memory is not dirty.
|
||||
*/
|
||||
for (i = 0; i < len; i++) {
|
||||
if (bitmap[i] != 0) {
|
||||
c = leul_to_cpu(bitmap[i]);
|
||||
do {
|
||||
j = ctzl(c);
|
||||
c &= ~(1ul << j);
|
||||
page_number = (i * HOST_LONG_BITS + j) * hpratio;
|
||||
addr = page_number * TARGET_PAGE_SIZE;
|
||||
ram_addr = start + addr;
|
||||
cpu_physical_memory_set_dirty_range(uc, ram_addr,
|
||||
TARGET_PAGE_SIZE * hpratio, clients);
|
||||
} while (c != 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif /* not _WIN32 */
|
||||
|
||||
bool cpu_physical_memory_test_and_clear_dirty(struct uc_struct *uc,
|
||||
ram_addr_t start,
|
||||
ram_addr_t length,
|
||||
unsigned client);
|
||||
|
||||
static inline void cpu_physical_memory_clear_dirty_range(struct uc_struct *uc, ram_addr_t start,
|
||||
ram_addr_t length)
|
||||
{
|
||||
cpu_physical_memory_test_and_clear_dirty(uc, start, length, DIRTY_MEMORY_CODE);
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -309,17 +309,12 @@
|
|||
#define cpu_outw cpu_outw_m68k
|
||||
#define cpu_physical_memory_all_dirty cpu_physical_memory_all_dirty_m68k
|
||||
#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_m68k
|
||||
#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_m68k
|
||||
#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_m68k
|
||||
#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_m68k
|
||||
#define cpu_physical_memory_is_io cpu_physical_memory_is_io_m68k
|
||||
#define cpu_physical_memory_map cpu_physical_memory_map_m68k
|
||||
#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_m68k
|
||||
#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_m68k
|
||||
#define cpu_physical_memory_rw cpu_physical_memory_rw_m68k
|
||||
#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_m68k
|
||||
#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_m68k
|
||||
#define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_m68k
|
||||
#define cpu_physical_memory_unmap cpu_physical_memory_unmap_m68k
|
||||
#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_m68k
|
||||
#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_m68k
|
||||
|
@ -2932,13 +2927,11 @@
|
|||
#define tlb_flush_page tlb_flush_page_m68k
|
||||
#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_m68k
|
||||
#define tlb_is_dirty_ram tlb_is_dirty_ram_m68k
|
||||
#define tlb_protect_code tlb_protect_code_m68k
|
||||
#define tlb_reset_dirty tlb_reset_dirty_m68k
|
||||
#define tlb_reset_dirty_range tlb_reset_dirty_range_m68k
|
||||
#define tlb_set_dirty tlb_set_dirty_m68k
|
||||
#define tlb_set_page tlb_set_page_m68k
|
||||
#define tlb_set_page_with_attrs tlb_set_page_with_attrs_m68k
|
||||
#define tlb_unprotect_code tlb_unprotect_code_m68k
|
||||
#define tlb_vaddr_to_host tlb_vaddr_to_host_m68k
|
||||
#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_m68k
|
||||
#define tlbi_aa64_asid_write tlbi_aa64_asid_write_m68k
|
||||
|
|
|
@ -376,7 +376,6 @@ void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL,
|
|||
hwaddr l = 4;
|
||||
hwaddr addr1;
|
||||
MemTxResult r;
|
||||
uint8_t dirty_log_mask;
|
||||
// Unicorn: commented out
|
||||
//bool release_lock = false;
|
||||
|
||||
|
@ -391,11 +390,6 @@ void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL,
|
|||
} else {
|
||||
ptr = MAP_RAM(mr, addr1);
|
||||
stl_p(ptr, val);
|
||||
|
||||
dirty_log_mask = memory_region_get_dirty_log_mask(mr);
|
||||
dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
|
||||
cpu_physical_memory_set_dirty_range(mr->uc, memory_region_get_ram_addr(mr) + addr,
|
||||
4, dirty_log_mask);
|
||||
r = MEMTX_OK;
|
||||
}
|
||||
if (result) {
|
||||
|
|
|
@ -309,17 +309,12 @@
|
|||
#define cpu_outw cpu_outw_mips
|
||||
#define cpu_physical_memory_all_dirty cpu_physical_memory_all_dirty_mips
|
||||
#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_mips
|
||||
#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_mips
|
||||
#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_mips
|
||||
#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_mips
|
||||
#define cpu_physical_memory_is_io cpu_physical_memory_is_io_mips
|
||||
#define cpu_physical_memory_map cpu_physical_memory_map_mips
|
||||
#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_mips
|
||||
#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_mips
|
||||
#define cpu_physical_memory_rw cpu_physical_memory_rw_mips
|
||||
#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_mips
|
||||
#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_mips
|
||||
#define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_mips
|
||||
#define cpu_physical_memory_unmap cpu_physical_memory_unmap_mips
|
||||
#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_mips
|
||||
#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_mips
|
||||
|
@ -2932,13 +2927,11 @@
|
|||
#define tlb_flush_page tlb_flush_page_mips
|
||||
#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_mips
|
||||
#define tlb_is_dirty_ram tlb_is_dirty_ram_mips
|
||||
#define tlb_protect_code tlb_protect_code_mips
|
||||
#define tlb_reset_dirty tlb_reset_dirty_mips
|
||||
#define tlb_reset_dirty_range tlb_reset_dirty_range_mips
|
||||
#define tlb_set_dirty tlb_set_dirty_mips
|
||||
#define tlb_set_page tlb_set_page_mips
|
||||
#define tlb_set_page_with_attrs tlb_set_page_with_attrs_mips
|
||||
#define tlb_unprotect_code tlb_unprotect_code_mips
|
||||
#define tlb_vaddr_to_host tlb_vaddr_to_host_mips
|
||||
#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_mips
|
||||
#define tlbi_aa64_asid_write tlbi_aa64_asid_write_mips
|
||||
|
|
|
@ -309,17 +309,12 @@
|
|||
#define cpu_outw cpu_outw_mips64
|
||||
#define cpu_physical_memory_all_dirty cpu_physical_memory_all_dirty_mips64
|
||||
#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_mips64
|
||||
#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_mips64
|
||||
#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_mips64
|
||||
#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_mips64
|
||||
#define cpu_physical_memory_is_io cpu_physical_memory_is_io_mips64
|
||||
#define cpu_physical_memory_map cpu_physical_memory_map_mips64
|
||||
#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_mips64
|
||||
#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_mips64
|
||||
#define cpu_physical_memory_rw cpu_physical_memory_rw_mips64
|
||||
#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_mips64
|
||||
#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_mips64
|
||||
#define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_mips64
|
||||
#define cpu_physical_memory_unmap cpu_physical_memory_unmap_mips64
|
||||
#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_mips64
|
||||
#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_mips64
|
||||
|
@ -2932,13 +2927,11 @@
|
|||
#define tlb_flush_page tlb_flush_page_mips64
|
||||
#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_mips64
|
||||
#define tlb_is_dirty_ram tlb_is_dirty_ram_mips64
|
||||
#define tlb_protect_code tlb_protect_code_mips64
|
||||
#define tlb_reset_dirty tlb_reset_dirty_mips64
|
||||
#define tlb_reset_dirty_range tlb_reset_dirty_range_mips64
|
||||
#define tlb_set_dirty tlb_set_dirty_mips64
|
||||
#define tlb_set_page tlb_set_page_mips64
|
||||
#define tlb_set_page_with_attrs tlb_set_page_with_attrs_mips64
|
||||
#define tlb_unprotect_code tlb_unprotect_code_mips64
|
||||
#define tlb_vaddr_to_host tlb_vaddr_to_host_mips64
|
||||
#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_mips64
|
||||
#define tlbi_aa64_asid_write tlbi_aa64_asid_write_mips64
|
||||
|
|
|
@ -309,17 +309,12 @@
|
|||
#define cpu_outw cpu_outw_mips64el
|
||||
#define cpu_physical_memory_all_dirty cpu_physical_memory_all_dirty_mips64el
|
||||
#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_mips64el
|
||||
#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_mips64el
|
||||
#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_mips64el
|
||||
#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_mips64el
|
||||
#define cpu_physical_memory_is_io cpu_physical_memory_is_io_mips64el
|
||||
#define cpu_physical_memory_map cpu_physical_memory_map_mips64el
|
||||
#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_mips64el
|
||||
#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_mips64el
|
||||
#define cpu_physical_memory_rw cpu_physical_memory_rw_mips64el
|
||||
#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_mips64el
|
||||
#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_mips64el
|
||||
#define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_mips64el
|
||||
#define cpu_physical_memory_unmap cpu_physical_memory_unmap_mips64el
|
||||
#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_mips64el
|
||||
#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_mips64el
|
||||
|
@ -2932,13 +2927,11 @@
|
|||
#define tlb_flush_page tlb_flush_page_mips64el
|
||||
#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_mips64el
|
||||
#define tlb_is_dirty_ram tlb_is_dirty_ram_mips64el
|
||||
#define tlb_protect_code tlb_protect_code_mips64el
|
||||
#define tlb_reset_dirty tlb_reset_dirty_mips64el
|
||||
#define tlb_reset_dirty_range tlb_reset_dirty_range_mips64el
|
||||
#define tlb_set_dirty tlb_set_dirty_mips64el
|
||||
#define tlb_set_page tlb_set_page_mips64el
|
||||
#define tlb_set_page_with_attrs tlb_set_page_with_attrs_mips64el
|
||||
#define tlb_unprotect_code tlb_unprotect_code_mips64el
|
||||
#define tlb_vaddr_to_host tlb_vaddr_to_host_mips64el
|
||||
#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_mips64el
|
||||
#define tlbi_aa64_asid_write tlbi_aa64_asid_write_mips64el
|
||||
|
|
|
@ -309,17 +309,12 @@
|
|||
#define cpu_outw cpu_outw_mipsel
|
||||
#define cpu_physical_memory_all_dirty cpu_physical_memory_all_dirty_mipsel
|
||||
#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_mipsel
|
||||
#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_mipsel
|
||||
#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_mipsel
|
||||
#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_mipsel
|
||||
#define cpu_physical_memory_is_io cpu_physical_memory_is_io_mipsel
|
||||
#define cpu_physical_memory_map cpu_physical_memory_map_mipsel
|
||||
#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_mipsel
|
||||
#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_mipsel
|
||||
#define cpu_physical_memory_rw cpu_physical_memory_rw_mipsel
|
||||
#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_mipsel
|
||||
#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_mipsel
|
||||
#define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_mipsel
|
||||
#define cpu_physical_memory_unmap cpu_physical_memory_unmap_mipsel
|
||||
#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_mipsel
|
||||
#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_mipsel
|
||||
|
@ -2932,13 +2927,11 @@
|
|||
#define tlb_flush_page tlb_flush_page_mipsel
|
||||
#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_mipsel
|
||||
#define tlb_is_dirty_ram tlb_is_dirty_ram_mipsel
|
||||
#define tlb_protect_code tlb_protect_code_mipsel
|
||||
#define tlb_reset_dirty tlb_reset_dirty_mipsel
|
||||
#define tlb_reset_dirty_range tlb_reset_dirty_range_mipsel
|
||||
#define tlb_set_dirty tlb_set_dirty_mipsel
|
||||
#define tlb_set_page tlb_set_page_mipsel
|
||||
#define tlb_set_page_with_attrs tlb_set_page_with_attrs_mipsel
|
||||
#define tlb_unprotect_code tlb_unprotect_code_mipsel
|
||||
#define tlb_vaddr_to_host tlb_vaddr_to_host_mipsel
|
||||
#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_mipsel
|
||||
#define tlbi_aa64_asid_write tlbi_aa64_asid_write_mipsel
|
||||
|
|
|
@ -309,17 +309,12 @@
|
|||
#define cpu_outw cpu_outw_powerpc
|
||||
#define cpu_physical_memory_all_dirty cpu_physical_memory_all_dirty_powerpc
|
||||
#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_powerpc
|
||||
#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_powerpc
|
||||
#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_powerpc
|
||||
#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_powerpc
|
||||
#define cpu_physical_memory_is_io cpu_physical_memory_is_io_powerpc
|
||||
#define cpu_physical_memory_map cpu_physical_memory_map_powerpc
|
||||
#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_powerpc
|
||||
#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_powerpc
|
||||
#define cpu_physical_memory_rw cpu_physical_memory_rw_powerpc
|
||||
#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_powerpc
|
||||
#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_powerpc
|
||||
#define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_powerpc
|
||||
#define cpu_physical_memory_unmap cpu_physical_memory_unmap_powerpc
|
||||
#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_powerpc
|
||||
#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_powerpc
|
||||
|
@ -2932,13 +2927,11 @@
|
|||
#define tlb_flush_page tlb_flush_page_powerpc
|
||||
#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_powerpc
|
||||
#define tlb_is_dirty_ram tlb_is_dirty_ram_powerpc
|
||||
#define tlb_protect_code tlb_protect_code_powerpc
|
||||
#define tlb_reset_dirty tlb_reset_dirty_powerpc
|
||||
#define tlb_reset_dirty_range tlb_reset_dirty_range_powerpc
|
||||
#define tlb_set_dirty tlb_set_dirty_powerpc
|
||||
#define tlb_set_page tlb_set_page_powerpc
|
||||
#define tlb_set_page_with_attrs tlb_set_page_with_attrs_powerpc
|
||||
#define tlb_unprotect_code tlb_unprotect_code_powerpc
|
||||
#define tlb_vaddr_to_host tlb_vaddr_to_host_powerpc
|
||||
#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_powerpc
|
||||
#define tlbi_aa64_asid_write tlbi_aa64_asid_write_powerpc
|
||||
|
|
|
@ -309,17 +309,12 @@
|
|||
#define cpu_outw cpu_outw_sparc
|
||||
#define cpu_physical_memory_all_dirty cpu_physical_memory_all_dirty_sparc
|
||||
#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_sparc
|
||||
#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_sparc
|
||||
#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_sparc
|
||||
#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_sparc
|
||||
#define cpu_physical_memory_is_io cpu_physical_memory_is_io_sparc
|
||||
#define cpu_physical_memory_map cpu_physical_memory_map_sparc
|
||||
#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_sparc
|
||||
#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_sparc
|
||||
#define cpu_physical_memory_rw cpu_physical_memory_rw_sparc
|
||||
#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_sparc
|
||||
#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_sparc
|
||||
#define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_sparc
|
||||
#define cpu_physical_memory_unmap cpu_physical_memory_unmap_sparc
|
||||
#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_sparc
|
||||
#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_sparc
|
||||
|
@ -2932,13 +2927,11 @@
|
|||
#define tlb_flush_page tlb_flush_page_sparc
|
||||
#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_sparc
|
||||
#define tlb_is_dirty_ram tlb_is_dirty_ram_sparc
|
||||
#define tlb_protect_code tlb_protect_code_sparc
|
||||
#define tlb_reset_dirty tlb_reset_dirty_sparc
|
||||
#define tlb_reset_dirty_range tlb_reset_dirty_range_sparc
|
||||
#define tlb_set_dirty tlb_set_dirty_sparc
|
||||
#define tlb_set_page tlb_set_page_sparc
|
||||
#define tlb_set_page_with_attrs tlb_set_page_with_attrs_sparc
|
||||
#define tlb_unprotect_code tlb_unprotect_code_sparc
|
||||
#define tlb_vaddr_to_host tlb_vaddr_to_host_sparc
|
||||
#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_sparc
|
||||
#define tlbi_aa64_asid_write tlbi_aa64_asid_write_sparc
|
||||
|
|
|
@ -309,17 +309,12 @@
|
|||
#define cpu_outw cpu_outw_sparc64
|
||||
#define cpu_physical_memory_all_dirty cpu_physical_memory_all_dirty_sparc64
|
||||
#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_sparc64
|
||||
#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_sparc64
|
||||
#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_sparc64
|
||||
#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_sparc64
|
||||
#define cpu_physical_memory_is_io cpu_physical_memory_is_io_sparc64
|
||||
#define cpu_physical_memory_map cpu_physical_memory_map_sparc64
|
||||
#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_sparc64
|
||||
#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_sparc64
|
||||
#define cpu_physical_memory_rw cpu_physical_memory_rw_sparc64
|
||||
#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_sparc64
|
||||
#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_sparc64
|
||||
#define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_sparc64
|
||||
#define cpu_physical_memory_unmap cpu_physical_memory_unmap_sparc64
|
||||
#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_sparc64
|
||||
#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_sparc64
|
||||
|
@ -2932,13 +2927,11 @@
|
|||
#define tlb_flush_page tlb_flush_page_sparc64
|
||||
#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_sparc64
|
||||
#define tlb_is_dirty_ram tlb_is_dirty_ram_sparc64
|
||||
#define tlb_protect_code tlb_protect_code_sparc64
|
||||
#define tlb_reset_dirty tlb_reset_dirty_sparc64
|
||||
#define tlb_reset_dirty_range tlb_reset_dirty_range_sparc64
|
||||
#define tlb_set_dirty tlb_set_dirty_sparc64
|
||||
#define tlb_set_page tlb_set_page_sparc64
|
||||
#define tlb_set_page_with_attrs tlb_set_page_with_attrs_sparc64
|
||||
#define tlb_unprotect_code tlb_unprotect_code_sparc64
|
||||
#define tlb_vaddr_to_host tlb_vaddr_to_host_sparc64
|
||||
#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_sparc64
|
||||
#define tlbi_aa64_asid_write tlbi_aa64_asid_write_sparc64
|
||||
|
|
|
@ -1271,13 +1271,6 @@ static inline void tb_alloc_page(struct uc_struct *uc, TranslationBlock *tb,
|
|||
printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
|
||||
}
|
||||
}
|
||||
#else
|
||||
/* if some code is already present, then the pages are already
|
||||
protected. So we handle the case where only the first TB is
|
||||
allocated in a physical page */
|
||||
if (!page_already_protected) {
|
||||
tlb_protect_code(uc, page_addr);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -1532,7 +1525,6 @@ void tb_invalidate_phys_page_range(struct uc_struct *uc, tb_page_addr_t start, t
|
|||
int is_cpu_write_access)
|
||||
{
|
||||
TranslationBlock *tb, *tb_next;
|
||||
CPUState *cpu = uc->current_cpu;
|
||||
#if defined(TARGET_HAS_PRECISE_SMC)
|
||||
CPUArchState *env = NULL;
|
||||
#endif
|
||||
|
@ -1613,7 +1605,6 @@ void tb_invalidate_phys_page_range(struct uc_struct *uc, tb_page_addr_t start, t
|
|||
/* if no code remaining, no need to continue to use slow writes */
|
||||
if (!p->first_tb) {
|
||||
invalidate_page_bitmap(p);
|
||||
tlb_unprotect_code(cpu, start);
|
||||
}
|
||||
#endif
|
||||
#ifdef TARGET_HAS_PRECISE_SMC
|
||||
|
|
|
@ -309,17 +309,12 @@
|
|||
#define cpu_outw cpu_outw_x86_64
|
||||
#define cpu_physical_memory_all_dirty cpu_physical_memory_all_dirty_x86_64
|
||||
#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_x86_64
|
||||
#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_x86_64
|
||||
#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_x86_64
|
||||
#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_x86_64
|
||||
#define cpu_physical_memory_is_io cpu_physical_memory_is_io_x86_64
|
||||
#define cpu_physical_memory_map cpu_physical_memory_map_x86_64
|
||||
#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_x86_64
|
||||
#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_x86_64
|
||||
#define cpu_physical_memory_rw cpu_physical_memory_rw_x86_64
|
||||
#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_x86_64
|
||||
#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_x86_64
|
||||
#define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_x86_64
|
||||
#define cpu_physical_memory_unmap cpu_physical_memory_unmap_x86_64
|
||||
#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_x86_64
|
||||
#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_x86_64
|
||||
|
@ -2932,13 +2927,11 @@
|
|||
#define tlb_flush_page tlb_flush_page_x86_64
|
||||
#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_x86_64
|
||||
#define tlb_is_dirty_ram tlb_is_dirty_ram_x86_64
|
||||
#define tlb_protect_code tlb_protect_code_x86_64
|
||||
#define tlb_reset_dirty tlb_reset_dirty_x86_64
|
||||
#define tlb_reset_dirty_range tlb_reset_dirty_range_x86_64
|
||||
#define tlb_set_dirty tlb_set_dirty_x86_64
|
||||
#define tlb_set_page tlb_set_page_x86_64
|
||||
#define tlb_set_page_with_attrs tlb_set_page_with_attrs_x86_64
|
||||
#define tlb_unprotect_code tlb_unprotect_code_x86_64
|
||||
#define tlb_vaddr_to_host tlb_vaddr_to_host_x86_64
|
||||
#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_x86_64
|
||||
#define tlbi_aa64_asid_write tlbi_aa64_asid_write_x86_64
|
||||
|
|
18
uc.c
18
uc.c
|
@ -281,23 +281,6 @@ uc_err uc_open(uc_arch arch, uc_mode mode, uc_engine **result)
|
|||
}
|
||||
}
|
||||
|
||||
static void ramlist_free_dirty_memory(struct uc_struct *uc)
|
||||
{
|
||||
int i;
|
||||
DirtyMemoryBlocks** blocks = uc->ram_list.dirty_memory;
|
||||
|
||||
for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
|
||||
DirtyMemoryBlocks* block = blocks[i];
|
||||
int j;
|
||||
|
||||
for (j = 0; j < block->num_blocks; j++) {
|
||||
free(block->blocks[j]);
|
||||
}
|
||||
|
||||
free(blocks[i]);
|
||||
}
|
||||
}
|
||||
|
||||
static void free_hooks(uc_engine *uc)
|
||||
{
|
||||
struct list_item *cur;
|
||||
|
@ -363,7 +346,6 @@ uc_err uc_close(uc_engine *uc)
|
|||
g_hash_table_foreach(uc->type_table, free_table, uc);
|
||||
g_hash_table_destroy(uc->type_table);
|
||||
|
||||
ramlist_free_dirty_memory(uc);
|
||||
free_hooks(uc);
|
||||
free(uc->mapped_blocks);
|
||||
|
||||
|
|
Loading…
Reference in a new issue