diff options
author | Lorry Tar Creator <lorry-tar-importer@baserock.org> | 2014-03-26 19:21:20 +0000 |
---|---|---|
committer | <> | 2014-05-08 15:03:54 +0000 |
commit | fb123f93f9f5ce42c8e5785d2f8e0edaf951740e (patch) | |
tree | c2103d76aec5f1f10892cd1d3a38e24f665ae5db /src/recompiler/target-i386 | |
parent | 58ed4748338f9466599adfc8a9171280ed99e23f (diff) | |
download | VirtualBox-master.tar.gz |
Imported from /home/lorry/working-area/delta_VirtualBox/VirtualBox-4.3.10.tar.bz2.HEADVirtualBox-4.3.10master
Diffstat (limited to 'src/recompiler/target-i386')
-rw-r--r-- | src/recompiler/target-i386/cpu.h | 38 | ||||
-rw-r--r-- | src/recompiler/target-i386/op_helper.c | 226 | ||||
-rw-r--r-- | src/recompiler/target-i386/translate.c | 2 |
3 files changed, 211 insertions, 55 deletions
diff --git a/src/recompiler/target-i386/cpu.h b/src/recompiler/target-i386/cpu.h index b5159bf9..2adbd869 100644 --- a/src/recompiler/target-i386/cpu.h +++ b/src/recompiler/target-i386/cpu.h @@ -65,6 +65,7 @@ # include <VBox/vmm/vmm.h> # include <VBox/vmm/stam.h> # include <VBox/vmm/cpumctx.h> +# undef MSR_IA32_APICBASE_BSP #endif /* VBOX */ #define R_EAX 0 @@ -115,6 +116,10 @@ #define DESC_W_MASK (1 << 9) /* data: writable */ #define DESC_TSS_BUSY_MASK (1 << 9) +#ifdef VBOX +# define DESC_INTEL_UNUSABLE RT_BIT_32(16+8) /**< Internal VT-x bit for NULL sectors. */ +# define DESC_RAW_FLAG_BITS UINT32_C(0x00ffffff) /**< Flag bits we load from the descriptor. */ +#endif /* eflags masks */ #define CC_C 0x0001 @@ -909,7 +914,7 @@ typedef struct CPUX86State_Ver16 { # define CPU_RAW_RING0 0x0002 /* Set after first time RawR0 is executed, never cleared. */ # define CPU_EMULATE_SINGLE_INSTR 0x0040 /* Execute a single instruction in emulation mode */ # define CPU_EMULATE_SINGLE_STEP 0x0080 /* go into single step mode */ -# define CPU_RAW_HWACC 0x0100 /* Set after first time HWACC is executed, never cleared. */ +# define CPU_RAW_HM 0x0100 /* Set after first time HWACC is executed, never cleared. */ /** @} */ #endif /* !VBOX */ @@ -930,11 +935,19 @@ void cpu_set_ferr(CPUX86State *s); /* this function must always be used to load data in the segment cache: it synchronizes the hflags with the segment cache values */ +#ifndef VBOX static inline void cpu_x86_load_seg_cache(CPUX86State *env, int seg_reg, unsigned int selector, target_ulong base, unsigned int limit, unsigned int flags) +#else +static inline void cpu_x86_load_seg_cache_with_clean_flags(CPUX86State *env, + int seg_reg, unsigned int selector, + target_ulong base, + unsigned int limit, + unsigned int flags) +#endif { SegmentCache *sc; unsigned int new_hflags; @@ -943,12 +956,8 @@ static inline void cpu_x86_load_seg_cache(CPUX86State *env, sc->selector = selector; sc->base = base; sc->limit = limit; -#ifndef VBOX - sc->flags = flags; -#else - if (flags & DESC_P_MASK) - flags |= DESC_A_MASK; /* Make sure the A bit is set to avoid trouble. */ sc->flags = flags; +#ifdef VBOX sc->newselector = 0; sc->fVBoxFlags = CPUMSELREG_FLAGS_VALID; #endif @@ -995,6 +1004,23 @@ static inline void cpu_x86_load_seg_cache(CPUX86State *env, } } +#ifdef VBOX +/* Raw input, adjust the flags adding the stupid intel flag when applicable. */ +static inline void cpu_x86_load_seg_cache(CPUX86State *env, + int seg_reg, unsigned int selector, + target_ulong base, + unsigned int limit, + unsigned int flags) +{ + flags &= DESC_RAW_FLAG_BITS; + if (flags & DESC_P_MASK) + flags |= DESC_A_MASK; /* Make sure the A bit is set to avoid trouble. */ + else if (selector < 4U) + flags |= DESC_INTEL_UNUSABLE; + cpu_x86_load_seg_cache_with_clean_flags(env, seg_reg, selector, base, limit, flags); +} +#endif + static inline void cpu_x86_load_seg_cache_sipi(CPUX86State *env, int sipi_vector) { diff --git a/src/recompiler/target-i386/op_helper.c b/src/recompiler/target-i386/op_helper.c index 048c6865..20d671cd 100644 --- a/src/recompiler/target-i386/op_helper.c +++ b/src/recompiler/target-i386/op_helper.c @@ -229,15 +229,6 @@ static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr, int index; target_ulong ptr; -#ifdef VBOX - /* Trying to load a selector with CPL=1? */ - if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0)) - { - Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc)); - selector = selector & 0xfffc; - } -#endif /* VBOX */ - if (selector & 0x4) dt = &env->ldt; else @@ -269,8 +260,10 @@ static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t { sc->base = get_seg_base(e1, e2); sc->limit = get_seg_limit(e1, e2); +#ifndef VBOX sc->flags = e2; -#ifdef VBOX +#else + sc->flags = e2 & DESC_RAW_FLAG_BITS; sc->newselector = 0; sc->fVBoxFlags = CPUMSELREG_FLAGS_VALID; #endif @@ -344,7 +337,7 @@ static void tss_load_seg(int seg_reg, int selector) /* Trying to load a selector with CPL=1? */ if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0)) { - Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc)); + Log(("RPL 1 -> sel %04X -> %04X (tss_load_seg)\n", selector, selector & 0xfffc)); selector = selector & 0xfffc; } #endif /* VBOX */ @@ -458,6 +451,7 @@ static void switch_tss(int tss_selector, else old_tss_limit_max = 43; +#ifndef VBOX /* The old TSS is written first... */ /* read all the registers from the new TSS */ if (type & 8) { /* 32 bit */ @@ -484,6 +478,7 @@ static void switch_tss(int tss_selector, new_segs[R_GS] = 0; new_trap = 0; } +#endif /* NOTE: we must avoid memory exceptions during the task switch, so we make dummy accesses before */ @@ -523,10 +518,6 @@ static void switch_tss(int tss_selector, stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI); for(i = 0; i < 6; i++) stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector); -#ifdef VBOX - /* Must store the ldt as it gets reloaded and might have been changed. */ - stw_kernel(env->tr.base + 0x60, env->ldt.selector); -#endif #if defined(VBOX) && defined(DEBUG) printf("TSS 32 bits switch\n"); printf("Saving CS=%08X\n", env->segs[R_CS].selector); @@ -544,12 +535,37 @@ static void switch_tss(int tss_selector, stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI); stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI); for(i = 0; i < 4; i++) - stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector); + stw_kernel(env->tr.base + (0x22 + i * 2), env->segs[i].selector); + } + #ifdef VBOX - /* Must store the ldt as it gets reloaded and might have been changed. */ - stw_kernel(env->tr.base + 0x2a, env->ldt.selector); -#endif + /* read all the registers from the new TSS - may be the same as the old one */ + if (type & 8) { + /* 32 bit */ + new_cr3 = ldl_kernel(tss_base + 0x1c); + new_eip = ldl_kernel(tss_base + 0x20); + new_eflags = ldl_kernel(tss_base + 0x24); + for(i = 0; i < 8; i++) + new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4)); + for(i = 0; i < 6; i++) + new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4)); + new_ldt = lduw_kernel(tss_base + 0x60); + new_trap = ldl_kernel(tss_base + 0x64); + } else { + /* 16 bit */ + new_cr3 = 0; + new_eip = lduw_kernel(tss_base + 0x0e); + new_eflags = lduw_kernel(tss_base + 0x10); + for(i = 0; i < 8; i++) + new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000; + for(i = 0; i < 4; i++) + new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 2)); + new_ldt = lduw_kernel(tss_base + 0x2a); + new_segs[R_FS] = 0; + new_segs[R_GS] = 0; + new_trap = 0; } +#endif /* now if an exception occurs, it will occurs in the next task context */ @@ -576,8 +592,10 @@ static void switch_tss(int tss_selector, env->tr.selector = tss_selector; env->tr.base = tss_base; env->tr.limit = tss_limit; +#ifndef VBOX env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK; -#ifdef VBOX +#else + env->tr.flags = e2 & (DESC_RAW_FLAG_BITS & ~(DESC_TSS_BUSY_MASK)); /** @todo stop clearing the busy bit, VT-x and AMD-V seems to set it in the hidden bits. */ env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID; env->tr.newselector = 0; #endif @@ -621,6 +639,7 @@ static void switch_tss(int tss_selector, env->ldt.limit = 0; env->ldt.flags = 0; #ifdef VBOX + env->ldt.flags = DESC_INTEL_UNUSABLE; env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID; env->ldt.newselector = 0; #endif @@ -955,6 +974,10 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, if (load_segment(&e1, &e2, selector) != 0) raise_exception_err(EXCP0D_GPF, selector & 0xfffc); +#ifdef VBOX /** @todo figure out when this is done one day... */ + if (!(e2 & DESC_A_MASK)) + e2 = set_segment_accessed(selector, e2); +#endif if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) raise_exception_err(EXCP0D_GPF, selector & 0xfffc); dpl = (e2 >> DESC_DPL_SHIFT) & 3; @@ -971,6 +994,10 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, raise_exception_err(EXCP0A_TSS, ss & 0xfffc); if (load_segment(&ss_e1, &ss_e2, ss) != 0) raise_exception_err(EXCP0A_TSS, ss & 0xfffc); +#ifdef VBOX /** @todo figure out when this is done one day... */ + if (!(ss_e2 & DESC_A_MASK)) + ss_e2 = set_segment_accessed(ss, ss_e2); +#endif ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; if (ss_dpl != dpl) raise_exception_err(EXCP0A_TSS, ss & 0xfffc); @@ -1323,7 +1350,11 @@ static void do_interrupt64(int intno, int is_int, int error_code, if (new_stack) { ss = 0 | dpl; +#ifndef VBOX cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0); +#else + cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT); +#endif } ESP = esp; @@ -1743,10 +1774,11 @@ static int check_exception(int intno, int *error_code) # ifndef VBOX qemu_system_reset_request(); + return EXCP_HLT; # else - remR3RaiseRC(env->pVM, VINF_EM_RESET); /** @todo test + improve tripple fault handling. */ + remR3RaiseRC(env->pVM, VINF_EM_TRIPLE_FAULT); + return EXCP_RC; # endif - return EXCP_HLT; } #endif @@ -2500,6 +2532,7 @@ void helper_lldt(int selector) env->ldt.base = 0; env->ldt.limit = 0; #ifdef VBOX + env->ldt.flags = DESC_INTEL_UNUSABLE; env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID; env->ldt.newselector = 0; #endif @@ -2555,19 +2588,19 @@ void helper_ltr(int selector) target_ulong ptr; #ifdef VBOX - Log(("helper_ltr: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n", - (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit, + Log(("helper_ltr: pc=%RGv old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n", + (RTGCPTR)env->eip, (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit, env->tr.flags, (RTSEL)(selector & 0xffff))); #endif selector &= 0xffff; if ((selector & 0xfffc) == 0) { /* NULL selector case: invalid TR */ +#ifdef VBOX + raise_exception_err(EXCP0A_TSS, 0); +#else env->tr.base = 0; env->tr.limit = 0; env->tr.flags = 0; -#ifdef VBOX - env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID; - env->tr.newselector = 0; #endif } else { if (selector & 0x4) @@ -2636,12 +2669,13 @@ void helper_load_seg(int seg_reg, int selector) /* Trying to load a selector with CPL=1? */ if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0)) { - Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc)); + Log(("RPL 1 -> sel %04X -> %04X (helper_load_seg)\n", selector, selector & 0xfffc)); selector = selector & 0xfffc; } #endif /* VBOX */ if ((selector & 0xfffc) == 0) { /* null selector case */ +#ifndef VBOX if (seg_reg == R_SS #ifdef TARGET_X86_64 && (!(env->hflags & HF_CS64_MASK) || cpl == 3) @@ -2649,6 +2683,16 @@ void helper_load_seg(int seg_reg, int selector) ) raise_exception_err(EXCP0D_GPF, 0); cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0); +#else + if (seg_reg == R_SS) { + if (!(env->hflags & HF_CS64_MASK) || cpl == 3) + raise_exception_err(EXCP0D_GPF, 0); + e2 = (cpl << DESC_DPL_SHIFT) | DESC_INTEL_UNUSABLE; + } else { + e2 = DESC_INTEL_UNUSABLE; + } + cpu_x86_load_seg_cache_with_clean_flags(env, seg_reg, selector, 0, 0, e2); +#endif } else { if (selector & 0x4) @@ -2746,6 +2790,10 @@ void helper_ljmp_protected(int new_cs, target_ulong new_eip, if (new_eip > limit && !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); +#ifdef VBOX + if (!(e2 & DESC_A_MASK)) + e2 = set_segment_accessed(new_cs, e2); +#endif cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, get_seg_base(e1, e2), limit, e2); EIP = new_eip; @@ -2871,6 +2919,10 @@ void helper_lcall_protected(int new_cs, target_ulong new_eip, } if (!(e2 & DESC_P_MASK)) raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); +#ifdef VBOX + if (!(e2 & DESC_A_MASK)) + e2 = set_segment_accessed(new_cs, e2); +#endif #ifdef TARGET_X86_64 /* XXX: check 16/32 bit cases in long mode */ @@ -3168,11 +3220,12 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) new_cs &= 0xffff; if (is_iret) { POPL(ssp, sp, sp_mask, new_eflags); +#define LOG_GROUP LOG_GROUP_REM #if defined(VBOX) && defined(DEBUG) - printf("iret: new CS %04X\n", new_cs); - printf("iret: new EIP %08X\n", (uint32_t)new_eip); - printf("iret: new EFLAGS %08X\n", new_eflags); - printf("iret: EAX=%08x\n", (uint32_t)EAX); + Log(("iret: new CS %04X (old=%x)\n", new_cs, env->segs[R_CS].selector)); + Log(("iret: new EIP %08X\n", (uint32_t)new_eip)); + Log(("iret: new EFLAGS %08X\n", new_eflags)); + Log(("iret: EAX=%08x\n", (uint32_t)EAX)); #endif if (new_eflags & VM_MASK) goto return_to_vm86; @@ -3180,10 +3233,22 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) #ifdef VBOX if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0)) { -# ifdef DEBUG - printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc); -# endif - new_cs = new_cs & 0xfffc; + if ( !EMIsRawRing1Enabled(env->pVM) + || env->segs[R_CS].selector == (new_cs & 0xfffc)) + { + Log(("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc)); + new_cs = new_cs & 0xfffc; + } + else + { + /* Ugly assumption: assume a genuine switch to ring-1. */ + Log(("Genuine switch to ring-1 (iret)\n")); + } + } + else if ((new_cs & 0x3) == 2 && (env->state & CPU_RAW_RING0) && EMIsRawRing1Enabled(env->pVM)) + { + Log(("RPL 2 -> new_cs %04X -> %04X\n", new_cs, (new_cs & 0xfffc) | 1)); + new_cs = (new_cs & 0xfffc) | 1; } #endif } else { @@ -3199,14 +3264,14 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) if ((new_cs & 0xfffc) == 0) { #if defined(VBOX) && defined(DEBUG) - printf("new_cs & 0xfffc) == 0\n"); + Log(("new_cs & 0xfffc) == 0\n")); #endif raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); } if (load_segment(&e1, &e2, new_cs) != 0) { #if defined(VBOX) && defined(DEBUG) - printf("load_segment failed\n"); + Log(("load_segment failed\n")); #endif raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); } @@ -3214,7 +3279,7 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) !(e2 & DESC_CS_MASK)) { #if defined(VBOX) && defined(DEBUG) - printf("e2 mask %08x\n", e2); + Log(("e2 mask %08x\n", e2)); #endif raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); } @@ -3223,16 +3288,17 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) if (rpl < cpl) { #if defined(VBOX) && defined(DEBUG) - printf("rpl < cpl (%d vs %d)\n", rpl, cpl); + Log(("rpl < cpl (%d vs %d)\n", rpl, cpl)); #endif raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); } dpl = (e2 >> DESC_DPL_SHIFT) & 3; + if (e2 & DESC_C_MASK) { if (dpl > rpl) { #if defined(VBOX) && defined(DEBUG) - printf("dpl > rpl (%d vs %d)\n", dpl, rpl); + Log(("dpl > rpl (%d vs %d)\n", dpl, rpl)); #endif raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); } @@ -3240,7 +3306,7 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) if (dpl != rpl) { #if defined(VBOX) && defined(DEBUG) - printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2); + Log(("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2)); #endif raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); } @@ -3248,7 +3314,7 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) if (!(e2 & DESC_P_MASK)) { #if defined(VBOX) && defined(DEBUG) - printf("DESC_P_MASK e2=%08x\n", e2); + Log(("DESC_P_MASK e2=%08x\n", e2)); #endif raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); } @@ -3289,12 +3355,9 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) if ((new_ss & 0xfffc) == 0) { #ifdef TARGET_X86_64 /* NULL ss is allowed in long mode if cpl != 3*/ +# ifndef VBOX /* XXX: test CS64 ? */ if ((env->hflags & HF_LMA_MASK) && rpl != 3) { -# ifdef VBOX - if (!(e2 & DESC_A_MASK)) - e2 = set_segment_accessed(new_cs, e2); -# endif cpu_x86_load_seg_cache(env, R_SS, new_ss, 0, 0xffffffff, DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | @@ -3302,24 +3365,62 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) DESC_W_MASK | DESC_A_MASK); ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */ } else +# else /* VBOX */ + if ((env->hflags & HF_LMA_MASK) && rpl != 3 && (e2 & DESC_L_MASK)) { + if (!(e2 & DESC_A_MASK)) + e2 = set_segment_accessed(new_cs, e2); + cpu_x86_load_seg_cache_with_clean_flags(env, R_SS, new_ss, + 0, 0xffffffff, + DESC_INTEL_UNUSABLE | (rpl << DESC_DPL_SHIFT) ); + ss_e2 = DESC_B_MASK; /* not really used */ + } else +# endif #endif { +#if defined(VBOX) && defined(DEBUG) + Log(("NULL ss, rpl=%d\n", rpl)); +#endif raise_exception_err(EXCP0D_GPF, 0); } } else { if ((new_ss & 3) != rpl) + { +#if defined(VBOX) && defined(DEBUG) + Log(("new_ss=%x != rpl=%d\n", new_ss, rpl)); +#endif raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); + } if (load_segment(&ss_e1, &ss_e2, new_ss) != 0) + { +#if defined(VBOX) && defined(DEBUG) + Log(("new_ss=%x load error\n", new_ss)); +#endif raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); + } if (!(ss_e2 & DESC_S_MASK) || (ss_e2 & DESC_CS_MASK) || !(ss_e2 & DESC_W_MASK)) + { +#if defined(VBOX) && defined(DEBUG) + Log(("new_ss=%x ss_e2=%#x bad type\n", new_ss, ss_e2)); +#endif raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); + } dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; if (dpl != rpl) + { +#if defined(VBOX) && defined(DEBUG) + Log(("SS.dpl=%u != rpl=%u\n", dpl, rpl)); +#endif raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); + } if (!(ss_e2 & DESC_P_MASK)) + { +#if defined(VBOX) && defined(DEBUG) + Log(("new_ss=%#x #NP\n", new_ss)); +#endif raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc); + } #ifdef VBOX if (!(e2 & DESC_A_MASK)) e2 = set_segment_accessed(new_cs, e2); @@ -3402,6 +3503,7 @@ void helper_iret_protected(int shift, int next_eip) uint32_t e1, e2; #ifdef VBOX + Log(("iret (shift=%d new_eip=%#x)\n", shift, next_eip)); e1 = e2 = 0; /** @todo Why do we do this? */ remR3TrapClear(env->pVM); #endif @@ -3410,7 +3512,12 @@ void helper_iret_protected(int shift, int next_eip) if (env->eflags & NT_MASK) { #ifdef TARGET_X86_64 if (env->hflags & HF_LMA_MASK) + { +#if defined(VBOX) && defined(DEBUG) + Log(("eflags.NT=1 on iret in long mode\n")); +#endif raise_exception_err(EXCP0D_GPF, 0); + } #endif tss_selector = lduw_kernel(env->tr.base + 0); if (tss_selector & 4) @@ -3580,14 +3687,31 @@ void helper_movl_drN_T0(int reg, target_ulong t0) hw_breakpoint_remove(env, reg); env->dr[reg] = t0; hw_breakpoint_insert(env, reg); +# ifndef VBOX } else if (reg == 7) { +# else + } else if (reg == 7 || reg == 5) { /* (DR5 is an alias for DR7.) */ + if (t0 & X86_DR7_MBZ_MASK) + raise_exception_err(EXCP0D_GPF, 0); + t0 |= X86_DR7_RA1_MASK; + t0 &= ~X86_DR7_RAZ_MASK; +# endif for (i = 0; i < 4; i++) hw_breakpoint_remove(env, i); env->dr[7] = t0; for (i = 0; i < 4; i++) hw_breakpoint_insert(env, i); - } else + } else { +# ifndef VBOX env->dr[reg] = t0; +# else + if (t0 & X86_DR6_MBZ_MASK) + raise_exception_err(EXCP0D_GPF, 0); + t0 |= X86_DR6_RA1_MASK; + t0 &= ~X86_DR6_RAZ_MASK; + env->dr[6] = t0; /* (DR4 is an alias for DR6.) */ +# endif + } } #endif @@ -4067,7 +4191,11 @@ target_ulong helper_lar(target_ulong selector1) } } CC_SRC = eflags | CC_Z; +#ifdef VBOX /* AMD says 0x00ffff00, while intel says 0x00fxff00. Bochs and IEM does like AMD says (x=f). */ + return e2 & 0x00ffff00; +#else return e2 & 0x00f0ff00; +#endif } void helper_verr(target_ulong selector1) @@ -6632,7 +6760,7 @@ void helper_svm_check_intercept_param(uint32_t type, uint64_t param) break; } #else /* VBOX */ - AssertMsgFailed(("We shouldn't be here, HWACCM supported differently!")); + AssertMsgFailed(("We shouldn't be here, HM supported differently!")); #endif /* VBOX */ } diff --git a/src/recompiler/target-i386/translate.c b/src/recompiler/target-i386/translate.c index 5adf907c..46bf8208 100644 --- a/src/recompiler/target-i386/translate.c +++ b/src/recompiler/target-i386/translate.c @@ -106,7 +106,9 @@ uint8_t ldub_code_raw(target_ulong pc) { uint8_t b; +# ifdef VBOX_WITH_RAW_MODE if (!remR3GetOpcode(cpu_single_env, pc, &b)) +# endif b = ldub_code(pc); return b; } |