diff --git a/qemu/target/i386/cpu.h b/qemu/target/i386/cpu.h index 6d2758cc..f985d6ce 100644 --- a/qemu/target/i386/cpu.h +++ b/qemu/target/i386/cpu.h @@ -1751,6 +1751,10 @@ void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector); void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32); void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32); +/* the binding language can not catch the exceptions. + check the arguments, return error instead of raise exceptions. */ +int uc_check_cpu_x86_load_seg(CPUX86State *env, int seg_reg, int sel); + /* you can call this signal handler from your SIGBUS and SIGSEGV signal handlers to inform the virtual CPU of exceptions. non zero is returned if the signal was handled by the virtual CPU. */ diff --git a/qemu/target/i386/seg_helper.c b/qemu/target/i386/seg_helper.c index 48035278..154b7814 100644 --- a/qemu/target/i386/seg_helper.c +++ b/qemu/target/i386/seg_helper.c @@ -1513,6 +1513,84 @@ void helper_ltr(CPUX86State *env, int selector) env->tr.selector = selector; } +// Unicorn: check the arguments before run cpu_x86_load_seg(). +int uc_check_cpu_x86_load_seg(CPUX86State *env, int seg_reg, int sel) +{ + int selector; + uint32_t e2; + int cpl, dpl, rpl; + SegmentCache *dt; + int index; + target_ulong ptr; + + if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) { + return 0; + } else { + selector = sel & 0xffff; + cpl = env->hflags & HF_CPL_MASK; + if ((selector & 0xfffc) == 0) { + /* null selector case */ + if (seg_reg == R_SS +#ifdef TARGET_X86_64 + && (!(env->hflags & HF_CS64_MASK) || cpl == 3) +#endif + ) { + return UC_ERR_EXCEPTION; + } + return 0; + } else { + if (selector & 0x4) { + dt = &env->ldt; + } else { + dt = &env->gdt; + } + index = selector & ~7; + if ((index + 7) > dt->limit) { + return UC_ERR_EXCEPTION; + } + ptr = dt->base + index; + e2 = cpu_ldl_kernel(env, ptr + 4); + + if (!(e2 & DESC_S_MASK)) { + return UC_ERR_EXCEPTION; + } + rpl = selector & 3; + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + if (seg_reg == R_SS) { + /* must be writable segment */ + if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { + return UC_ERR_EXCEPTION; + } + if (rpl != cpl || dpl != cpl) { + return UC_ERR_EXCEPTION; + } + } else { + /* must be readable segment */ + if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) { + return UC_ERR_EXCEPTION; + } + + if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { + /* if not conforming code, test rights */ + if (dpl < cpl || dpl < rpl) { + return UC_ERR_EXCEPTION; + } + } + } + + if (!(e2 & DESC_P_MASK)) { + if (seg_reg == R_SS) { + return UC_ERR_EXCEPTION; + } else { + return UC_ERR_EXCEPTION; + } + } + } + } + + return 0; +} + /* only works if protected mode and not VM86. seg_reg must be != R_CS */ void helper_load_seg(CPUX86State *env, int seg_reg, int selector) { diff --git a/qemu/target/i386/unicorn.c b/qemu/target/i386/unicorn.c index 4dd5809b..db2395d9 100644 --- a/qemu/target/i386/unicorn.c +++ b/qemu/target/i386/unicorn.c @@ -807,6 +807,7 @@ int x86_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, i CPUState *mycpu = uc->cpu; CPUX86State *state = &X86_CPU(uc, mycpu)->env; int i; + int ret; for (i = 0; i < count; i++) { unsigned int regid = regs[i]; @@ -1029,21 +1030,45 @@ int x86_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, i uc_emu_stop(uc); break; case UC_X86_REG_CS: + ret = uc_check_cpu_x86_load_seg(state, R_CS, *(uint16_t *)value); + if (ret) { + return ret; + } cpu_x86_load_seg(state, R_CS, *(uint16_t *)value); break; case UC_X86_REG_DS: + ret = uc_check_cpu_x86_load_seg(state, R_DS, *(uint16_t *)value); + if (ret) { + return ret; + } cpu_x86_load_seg(state, R_DS, *(uint16_t *)value); break; case UC_X86_REG_SS: + ret = uc_check_cpu_x86_load_seg(state, R_SS, *(uint16_t *)value); + if (ret) { + return ret; + } cpu_x86_load_seg(state, R_SS, *(uint16_t *)value); break; case UC_X86_REG_ES: + ret = uc_check_cpu_x86_load_seg(state, R_ES, *(uint16_t *)value); + if (ret) { + return ret; + } cpu_x86_load_seg(state, R_ES, *(uint16_t *)value); break; case UC_X86_REG_FS: + ret = uc_check_cpu_x86_load_seg(state, R_FS, *(uint16_t *)value); + if (ret) { + return ret; + } cpu_x86_load_seg(state, R_FS, *(uint16_t *)value); break; case UC_X86_REG_GS: + ret = uc_check_cpu_x86_load_seg(state, R_GS, *(uint16_t *)value); + if (ret) { + return ret; + } cpu_x86_load_seg(state, R_GS, *(uint16_t *)value); break; case UC_X86_REG_IDTR: @@ -1243,9 +1268,17 @@ int x86_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, i state->segs[R_ES].selector = *(uint16_t *)value; break; case UC_X86_REG_FS: + ret = uc_check_cpu_x86_load_seg(state, R_FS, *(uint16_t *)value); + if (ret) { + return ret; + } cpu_x86_load_seg(state, R_FS, *(uint16_t *)value); break; case UC_X86_REG_GS: + ret = uc_check_cpu_x86_load_seg(state, R_GS, *(uint16_t *)value); + if (ret) { + return ret; + } cpu_x86_load_seg(state, R_GS, *(uint16_t *)value); break; case UC_X86_REG_R8: diff --git a/uc.c b/uc.c index d20b7e52..82583712 100644 --- a/uc.c +++ b/uc.c @@ -395,12 +395,15 @@ uc_err uc_reg_read_batch(uc_engine *uc, int *ids, void **vals, int count) UNICORN_EXPORT uc_err uc_reg_write_batch(uc_engine *uc, int *ids, void *const *vals, int count) { - if (uc->reg_write) - uc->reg_write(uc, (unsigned int *)ids, vals, count); - else - return -1; // FIXME: need a proper uc_err + int ret = UC_ERR_OK; - return UC_ERR_OK; + if (uc->reg_write) { + ret = uc->reg_write(uc, (unsigned int *)ids, vals, count); + } else { + return UC_ERR_EXCEPTION; // FIXME: need a proper uc_err + } + + return ret; }