handle read/write/fetch from unaligned addresses. this adds new error codes UC_ERR_READ_UNALIGNED, UC_ERR_WRITE_UNALIGNED & UC_ERR_FETCH_UNALIGNED

This commit is contained in:
Nguyen Anh Quynh 2015-09-09 15:52:15 +08:00
parent 6b52be24a3
commit d3d38d3f21
5 changed files with 126 additions and 54 deletions

View file

@ -50,6 +50,9 @@ const (
ERR_READ_PROT = 14 ERR_READ_PROT = 14
ERR_EXEC_PROT = 15 ERR_EXEC_PROT = 15
ERR_INVAL = 16 ERR_INVAL = 16
ERR_READ_UNALIGNED = 17
ERR_WRITE_UNALIGNED = 18
ERR_FETCH_UNALIGNED = 19
MEM_READ = 16 MEM_READ = 16
MEM_WRITE = 17 MEM_WRITE = 17
MEM_READ_WRITE = 18 MEM_READ_WRITE = 18

View file

@ -48,6 +48,9 @@ UC_ERR_WRITE_PROT = 13
UC_ERR_READ_PROT = 14 UC_ERR_READ_PROT = 14
UC_ERR_EXEC_PROT = 15 UC_ERR_EXEC_PROT = 15
UC_ERR_INVAL = 16 UC_ERR_INVAL = 16
UC_ERR_READ_UNALIGNED = 17
UC_ERR_WRITE_UNALIGNED = 18
UC_ERR_FETCH_UNALIGNED = 19
UC_MEM_READ = 16 UC_MEM_READ = 16
UC_MEM_WRITE = 17 UC_MEM_WRITE = 17
UC_MEM_READ_WRITE = 18 UC_MEM_READ_WRITE = 18

View file

@ -122,6 +122,9 @@ typedef enum uc_err {
UC_ERR_READ_PROT, // Quit emulation due to UC_PROT_READ violation: uc_emu_start() UC_ERR_READ_PROT, // Quit emulation due to UC_PROT_READ violation: uc_emu_start()
UC_ERR_EXEC_PROT, // Quit emulation due to UC_PROT_EXEC violation: uc_emu_start() UC_ERR_EXEC_PROT, // Quit emulation due to UC_PROT_EXEC violation: uc_emu_start()
UC_ERR_INVAL, // Inavalid argument provided to uc_xxx function (See specific function API) UC_ERR_INVAL, // Inavalid argument provided to uc_xxx function (See specific function API)
UC_ERR_READ_UNALIGNED, // Unaligned read
UC_ERR_WRITE_UNALIGNED, // Unaligned write
UC_ERR_FETCH_UNALIGNED, // Unaligned fetch
} uc_err; } uc_err;

View file

@ -177,27 +177,35 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
uintptr_t haddr; uintptr_t haddr;
DATA_TYPE res; DATA_TYPE res;
int mem_access, error_code;
struct uc_struct *uc = env->uc; struct uc_struct *uc = env->uc;
MemoryRegion *mr = memory_mapping(uc, addr); MemoryRegion *mr = memory_mapping(uc, addr);
// memory can be unmapped while reading or fetching
if (mr == NULL) {
#if defined(SOFTMMU_CODE_ACCESS) #if defined(SOFTMMU_CODE_ACCESS)
// Unicorn: callback on fetch from unmapped memory mem_access = UC_MEM_FETCH;
if (mr == NULL) { // memory is not mapped error_code = UC_ERR_MEM_FETCH;
#else
mem_access = UC_MEM_READ;
error_code = UC_ERR_MEM_READ;
#endif
if (uc->hook_mem_idx != 0 && ((uc_cb_eventmem_t)uc->hook_callbacks[uc->hook_mem_idx].callback)( if (uc->hook_mem_idx != 0 && ((uc_cb_eventmem_t)uc->hook_callbacks[uc->hook_mem_idx].callback)(
uc, UC_MEM_FETCH, addr, DATA_SIZE, 0, uc, mem_access, addr, DATA_SIZE, 0,
uc->hook_callbacks[uc->hook_mem_idx].user_data)) { uc->hook_callbacks[uc->hook_mem_idx].user_data)) {
env->invalid_error = UC_ERR_OK; env->invalid_error = UC_ERR_OK;
mr = memory_mapping(uc, addr); // FIXME: what if mr is still NULL at this time? mr = memory_mapping(uc, addr); // FIXME: what if mr is still NULL at this time?
} else { } else {
env->invalid_addr = addr; env->invalid_addr = addr;
env->invalid_error = UC_ERR_MEM_FETCH; env->invalid_error = error_code;
// printf("***** Invalid fetch (unmapped memory) at " TARGET_FMT_lx "\n", addr); // printf("***** Invalid fetch (unmapped memory) at " TARGET_FMT_lx "\n", addr);
cpu_exit(uc->current_cpu); cpu_exit(uc->current_cpu);
return 0; return 0;
} }
} }
#if defined(SOFTMMU_CODE_ACCESS)
// Unicorn: callback on fetch from NX // Unicorn: callback on fetch from NX
if (mr != NULL && !(mr->perms & UC_PROT_EXEC)) { // non-executable if (mr != NULL && !(mr->perms & UC_PROT_EXEC)) { // non-executable
if (uc->hook_mem_idx != 0 && ((uc_cb_eventmem_t)uc->hook_callbacks[uc->hook_mem_idx].callback)( if (uc->hook_mem_idx != 0 && ((uc_cb_eventmem_t)uc->hook_callbacks[uc->hook_mem_idx].callback)(
@ -223,22 +231,6 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
} }
} }
// Unicorn: callback on invalid memory
if (READ_ACCESS_TYPE == MMU_DATA_LOAD && env->uc->hook_mem_idx && mr == NULL) {
if (!((uc_cb_eventmem_t)env->uc->hook_callbacks[env->uc->hook_mem_idx].callback)(
env->uc, UC_MEM_READ, addr, DATA_SIZE, 0,
env->uc->hook_callbacks[env->uc->hook_mem_idx].user_data)) {
// save error & quit
env->invalid_addr = addr;
env->invalid_error = UC_ERR_MEM_READ;
// printf("***** Invalid memory read at " TARGET_FMT_lx "\n", addr);
cpu_exit(env->uc->current_cpu);
return 0;
} else {
env->invalid_error = UC_ERR_OK;
}
}
// Unicorn: callback on non-readable memory // Unicorn: callback on non-readable memory
if (READ_ACCESS_TYPE == MMU_DATA_LOAD && mr != NULL && !(mr->perms & UC_PROT_READ)) { //non-readable if (READ_ACCESS_TYPE == MMU_DATA_LOAD && mr != NULL && !(mr->perms & UC_PROT_READ)) { //non-readable
if (uc->hook_mem_idx != 0 && ((uc_cb_eventmem_t)uc->hook_callbacks[uc->hook_mem_idx].callback)( if (uc->hook_mem_idx != 0 && ((uc_cb_eventmem_t)uc->hook_callbacks[uc->hook_mem_idx].callback)(
@ -263,8 +255,16 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
#ifdef ALIGNED_ONLY #ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) { if ((addr & (DATA_SIZE - 1)) != 0) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, //cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr); // mmu_idx, retaddr);
env->invalid_addr = addr;
#if defined(SOFTMMU_CODE_ACCESS)
env->invalid_error = UC_ERR_FETCH_UNALIGNED;
#else
env->invalid_error = UC_ERR_READ_UNALIGNED;
#endif
cpu_exit(uc->current_cpu);
return 0;
} }
#endif #endif
if (!VICTIM_TLB_HIT(ADDR_READ)) { if (!VICTIM_TLB_HIT(ADDR_READ)) {
@ -307,8 +307,16 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
unsigned shift; unsigned shift;
do_unaligned_access: do_unaligned_access:
#ifdef ALIGNED_ONLY #ifdef ALIGNED_ONLY
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, //cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr); // mmu_idx, retaddr);
env->invalid_addr = addr;
#if defined(SOFTMMU_CODE_ACCESS)
env->invalid_error = UC_ERR_FETCH_UNALIGNED;
#else
env->invalid_error = UC_ERR_READ_UNALIGNED;
#endif
cpu_exit(uc->current_cpu);
return 0;
#endif #endif
addr1 = addr & ~(DATA_SIZE - 1); addr1 = addr & ~(DATA_SIZE - 1);
addr2 = addr1 + DATA_SIZE; addr2 = addr1 + DATA_SIZE;
@ -326,8 +334,16 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
/* Handle aligned access or unaligned access in the same page. */ /* Handle aligned access or unaligned access in the same page. */
#ifdef ALIGNED_ONLY #ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) { if ((addr & (DATA_SIZE - 1)) != 0) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, //cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr); // mmu_idx, retaddr);
env->invalid_addr = addr;
#if defined(SOFTMMU_CODE_ACCESS)
env->invalid_error = UC_ERR_FETCH_UNALIGNED;
#else
env->invalid_error = UC_ERR_READ_UNALIGNED;
#endif
cpu_exit(uc->current_cpu);
return 0;
} }
#endif #endif
@ -351,27 +367,35 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
uintptr_t haddr; uintptr_t haddr;
DATA_TYPE res; DATA_TYPE res;
int mem_access, error_code;
struct uc_struct *uc = env->uc; struct uc_struct *uc = env->uc;
MemoryRegion *mr = memory_mapping(uc, addr); MemoryRegion *mr = memory_mapping(uc, addr);
// memory can be unmapped while reading or fetching
if (mr == NULL) {
#if defined(SOFTMMU_CODE_ACCESS) #if defined(SOFTMMU_CODE_ACCESS)
// Unicorn: callback on fetch from unmapped memory mem_access = UC_MEM_FETCH;
if (mr == NULL) { // memory is not mapped error_code = UC_ERR_MEM_FETCH;
#else
mem_access = UC_MEM_READ;
error_code = UC_ERR_MEM_READ;
#endif
if (uc->hook_mem_idx != 0 && ((uc_cb_eventmem_t)uc->hook_callbacks[uc->hook_mem_idx].callback)( if (uc->hook_mem_idx != 0 && ((uc_cb_eventmem_t)uc->hook_callbacks[uc->hook_mem_idx].callback)(
uc, UC_MEM_FETCH, addr, DATA_SIZE, 0, uc, mem_access, addr, DATA_SIZE, 0,
uc->hook_callbacks[uc->hook_mem_idx].user_data)) { uc->hook_callbacks[uc->hook_mem_idx].user_data)) {
env->invalid_error = UC_ERR_OK; env->invalid_error = UC_ERR_OK;
mr = memory_mapping(uc, addr); // FIXME: what if mr is still NULL at this time? mr = memory_mapping(uc, addr); // FIXME: what if mr is still NULL at this time?
} else { } else {
env->invalid_addr = addr; env->invalid_addr = addr;
env->invalid_error = UC_ERR_MEM_FETCH; env->invalid_error = error_code;
// printf("***** Invalid fetch (unmapped memory) at " TARGET_FMT_lx "\n", addr); // printf("***** Invalid fetch (unmapped memory) at " TARGET_FMT_lx "\n", addr);
cpu_exit(uc->current_cpu); cpu_exit(uc->current_cpu);
return 0; return 0;
} }
} }
#if defined(SOFTMMU_CODE_ACCESS)
// Unicorn: callback on fetch from NX // Unicorn: callback on fetch from NX
if (mr != NULL && !(mr->perms & UC_PROT_EXEC)) { // non-executable if (mr != NULL && !(mr->perms & UC_PROT_EXEC)) { // non-executable
if (uc->hook_mem_idx != 0 && ((uc_cb_eventmem_t)uc->hook_callbacks[uc->hook_mem_idx].callback)( if (uc->hook_mem_idx != 0 && ((uc_cb_eventmem_t)uc->hook_callbacks[uc->hook_mem_idx].callback)(
@ -397,22 +421,6 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
} }
} }
// Unicorn: callback on invalid memory
if (READ_ACCESS_TYPE == MMU_DATA_LOAD && env->uc->hook_mem_idx && mr == NULL) {
if (!((uc_cb_eventmem_t)env->uc->hook_callbacks[env->uc->hook_mem_idx].callback)(
env->uc, UC_MEM_READ, addr, DATA_SIZE, 0,
env->uc->hook_callbacks[env->uc->hook_mem_idx].user_data)) {
// save error & quit
env->invalid_addr = addr;
env->invalid_error = UC_ERR_MEM_READ;
// printf("***** Invalid memory read at " TARGET_FMT_lx "\n", addr);
cpu_exit(env->uc->current_cpu);
return 0;
} else {
env->invalid_error = UC_ERR_OK;
}
}
// Unicorn: callback on non-readable memory // Unicorn: callback on non-readable memory
if (READ_ACCESS_TYPE == MMU_DATA_LOAD && mr != NULL && !(mr->perms & UC_PROT_READ)) { //non-readable if (READ_ACCESS_TYPE == MMU_DATA_LOAD && mr != NULL && !(mr->perms & UC_PROT_READ)) { //non-readable
if (uc->hook_mem_idx != 0 && ((uc_cb_eventmem_t)uc->hook_callbacks[uc->hook_mem_idx].callback)( if (uc->hook_mem_idx != 0 && ((uc_cb_eventmem_t)uc->hook_callbacks[uc->hook_mem_idx].callback)(
@ -436,8 +444,16 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
#ifdef ALIGNED_ONLY #ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) { if ((addr & (DATA_SIZE - 1)) != 0) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, //cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr); // mmu_idx, retaddr);
env->invalid_addr = addr;
#if defined(SOFTMMU_CODE_ACCESS)
env->invalid_error = UC_ERR_FETCH_UNALIGNED;
#else
env->invalid_error = UC_ERR_READ_UNALIGNED;
#endif
cpu_exit(uc->current_cpu);
return 0;
} }
#endif #endif
if (!VICTIM_TLB_HIT(ADDR_READ)) { if (!VICTIM_TLB_HIT(ADDR_READ)) {
@ -479,8 +495,16 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
unsigned shift; unsigned shift;
do_unaligned_access: do_unaligned_access:
#ifdef ALIGNED_ONLY #ifdef ALIGNED_ONLY
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, //cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr); // mmu_idx, retaddr);
env->invalid_addr = addr;
#if defined(SOFTMMU_CODE_ACCESS)
env->invalid_error = UC_ERR_FETCH_UNALIGNED;
#else
env->invalid_error = UC_ERR_READ_UNALIGNED;
#endif
cpu_exit(uc->current_cpu);
return 0;
#endif #endif
addr1 = addr & ~(DATA_SIZE - 1); addr1 = addr & ~(DATA_SIZE - 1);
addr2 = addr1 + DATA_SIZE; addr2 = addr1 + DATA_SIZE;
@ -498,8 +522,16 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
/* Handle aligned access or unaligned access in the same page. */ /* Handle aligned access or unaligned access in the same page. */
#ifdef ALIGNED_ONLY #ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) { if ((addr & (DATA_SIZE - 1)) != 0) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, //cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr); // mmu_idx, retaddr);
env->invalid_addr = addr;
#if defined(SOFTMMU_CODE_ACCESS)
env->invalid_error = UC_ERR_FETCH_UNALIGNED;
#else
env->invalid_error = UC_ERR_READ_UNALIGNED;
#endif
cpu_exit(uc->current_cpu);
return 0;
} }
#endif #endif
@ -615,8 +647,12 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
#ifdef ALIGNED_ONLY #ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) { if ((addr & (DATA_SIZE - 1)) != 0) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, //cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr); // mmu_idx, retaddr);
env->invalid_addr = addr;
env->invalid_error = UC_ERR_WRITE_UNALIGNED;
cpu_exit(uc->current_cpu);
return;
} }
#endif #endif
if (!VICTIM_TLB_HIT(addr_write)) { if (!VICTIM_TLB_HIT(addr_write)) {
@ -656,6 +692,10 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
#ifdef ALIGNED_ONLY #ifdef ALIGNED_ONLY
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr); mmu_idx, retaddr);
env->invalid_addr = addr;
env->invalid_error = UC_ERR_WRITE_UNALIGNED;
cpu_exit(uc->current_cpu);
return;
#endif #endif
/* XXX: not efficient, but simple */ /* XXX: not efficient, but simple */
/* Note: relies on the fact that tlb_fill() does not remove the /* Note: relies on the fact that tlb_fill() does not remove the
@ -678,6 +718,10 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
if ((addr & (DATA_SIZE - 1)) != 0) { if ((addr & (DATA_SIZE - 1)) != 0) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr); mmu_idx, retaddr);
env->invalid_addr = addr;
env->invalid_error = UC_ERR_WRITE_UNALIGNED;
cpu_exit(uc->current_cpu);
return;
} }
#endif #endif
@ -751,6 +795,10 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
if ((addr & (DATA_SIZE - 1)) != 0) { if ((addr & (DATA_SIZE - 1)) != 0) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr); mmu_idx, retaddr);
env->invalid_addr = addr;
env->invalid_error = UC_ERR_WRITE_UNALIGNED;
cpu_exit(uc->current_cpu);
return;
} }
#endif #endif
if (!VICTIM_TLB_HIT(addr_write)) { if (!VICTIM_TLB_HIT(addr_write)) {
@ -790,6 +838,10 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
#ifdef ALIGNED_ONLY #ifdef ALIGNED_ONLY
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr); mmu_idx, retaddr);
env->invalid_addr = addr;
env->invalid_error = UC_ERR_WRITE_UNALIGNED;
cpu_exit(uc->current_cpu);
return;
#endif #endif
/* XXX: not efficient, but simple */ /* XXX: not efficient, but simple */
/* Note: relies on the fact that tlb_fill() does not remove the /* Note: relies on the fact that tlb_fill() does not remove the
@ -812,6 +864,10 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
if ((addr & (DATA_SIZE - 1)) != 0) { if ((addr & (DATA_SIZE - 1)) != 0) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr); mmu_idx, retaddr);
env->invalid_addr = addr;
env->invalid_error = UC_ERR_WRITE_UNALIGNED;
cpu_exit(uc->current_cpu);
return;
} }
#endif #endif

7
uc.c
View file

@ -91,6 +91,13 @@ const char *uc_strerror(uc_err code)
return "Fetch from non-executable memory (UC_ERR_EXEC_PROT)"; return "Fetch from non-executable memory (UC_ERR_EXEC_PROT)";
case UC_ERR_INVAL: case UC_ERR_INVAL:
return "Invalid argumet (UC_ERR_INVAL)"; return "Invalid argumet (UC_ERR_INVAL)";
case UC_ERR_READ_UNALIGNED:
return "Read from unaligned memory (UC_ERR_READ_UNALIGNED)";
case UC_ERR_WRITE_UNALIGNED:
return "Write to unaligned memory (UC_ERR_WRITE_UNALIGNED)";
case UC_ERR_FETCH_UNALIGNED:
return "Fetch from unaligned memory (UC_ERR_FETCH_UNALIGNED)";
} }
} }