diff options
Diffstat (limited to 'src/VBox/VMM/VMMRC')
26 files changed, 730 insertions, 719 deletions
diff --git a/src/VBox/VMM/VMMRC/CPUMRC.cpp b/src/VBox/VMM/VMMRC/CPUMRC.cpp index da647062..5c797837 100644 --- a/src/VBox/VMM/VMMRC/CPUMRC.cpp +++ b/src/VBox/VMM/VMMRC/CPUMRC.cpp @@ -24,6 +24,7 @@ #include <VBox/vmm/vmm.h> #include <VBox/vmm/patm.h> #include <VBox/vmm/trpm.h> +#include <VBox/vmm/em.h> #include "CPUMInternal.h" #include <VBox/vmm/vm.h> #include <VBox/err.h> @@ -105,3 +106,116 @@ DECLASM(void) CPUMRCAssertPreExecutionSanity(PVM pVM) AssertMsg(CPUMIsGuestInRawMode(pVCpu), ("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : "")); //Log2(("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : "")); } + + +/** + * Get the current privilege level of the guest. + * + * @returns CPL + * @param pVCpu The current virtual CPU. + * @param pRegFrame Pointer to the register frame. + * + * @todo r=bird: This is very similar to CPUMGetGuestCPL and I cannot quite + * see why this variant of the code is necessary. + */ +VMMDECL(uint32_t) CPUMRCGetGuestCPL(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame) +{ + /* + * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not. + * + * Note! We used to check CS.DPL here, assuming it was always equal to + * CPL even if a conforming segment was loaded. But this truned out to + * only apply to older AMD-V. With VT-x we had an ACP2 regression + * during install after a far call to ring 2 with VT-x. Then on newer + * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl + * as well as ss.Attr.n.u2Dpl to make this (and other) code work right. + * + * So, forget CS.DPL, always use SS.DPL. + * + * Note! The SS RPL is always equal to the CPL, while the CS RPL + * isn't necessarily equal if the segment is conforming. + * See section 4.11.1 in the AMD manual. + */ + uint32_t uCpl; + if (!pRegFrame->eflags.Bits.u1VM) + { + uCpl = (pRegFrame->ss.Sel & X86_SEL_RPL); +#ifdef VBOX_WITH_RAW_MODE_NOT_R0 +# ifdef VBOX_WITH_RAW_RING1 + if (pVCpu->cpum.s.fRawEntered) + { + if ( uCpl == 2 + && EMIsRawRing1Enabled(pVCpu->CTX_SUFF(pVM)) ) + uCpl = 1; + else if (uCpl == 1) + uCpl = 0; + } + Assert(uCpl != 2); /* ring 2 support not allowed anymore. */ +# else + if (uCpl == 1) + uCpl = 0; +# endif +#endif + } + else + uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */ + + return uCpl; +} + + +#ifdef VBOX_WITH_RAW_RING1 +/** + * Transforms the guest CPU state to raw-ring mode. + * + * This function will change the any of the cs and ss register with DPL=0 to DPL=1. + * + * Used by emInterpretIret() after the new state has been loaded. + * + * @param pVCpu Pointer to the VMCPU. + * @param pCtxCore The context core (for trap usage). + * @see @ref pg_raw + * @remarks Will be probably obsoleted by #5653 (it will leave and reenter raw + * mode instead, I think). + */ +VMMDECL(void) CPUMRCRecheckRawState(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore) +{ + /* + * Are we in Ring-0? + */ + if ( pCtxCore->ss.Sel + && (pCtxCore->ss.Sel & X86_SEL_RPL) == 0 + && !pCtxCore->eflags.Bits.u1VM) + { + /* + * Set CPL to Ring-1. + */ + pCtxCore->ss.Sel |= 1; + if ( pCtxCore->cs.Sel + && (pCtxCore->cs.Sel & X86_SEL_RPL) == 0) + pCtxCore->cs.Sel |= 1; + } + else + { + if ( EMIsRawRing1Enabled(pVCpu->CTX_SUFF(pVM)) + && !pCtxCore->eflags.Bits.u1VM + && (pCtxCore->ss.Sel & X86_SEL_RPL) == 1) + { + /* Set CPL to Ring-2. */ + pCtxCore->ss.Sel = (pCtxCore->ss.Sel & ~X86_SEL_RPL) | 2; + if (pCtxCore->cs.Sel && (pCtxCore->cs.Sel & X86_SEL_RPL) == 1) + pCtxCore->cs.Sel = (pCtxCore->cs.Sel & ~X86_SEL_RPL) | 2; + } + } + + /* + * Assert sanity. + */ + AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n")); + AssertReleaseMsg(pCtxCore->eflags.Bits.u2IOPL == 0, + ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss.Sel & X86_SEL_RPL)); + + pCtxCore->eflags.u32 |= X86_EFL_IF; /* paranoia */ +} +#endif /* VBOX_WITH_RAW_RING1 */ + diff --git a/src/VBox/VMM/VMMRC/CPUMRCA.asm b/src/VBox/VMM/VMMRC/CPUMRCA.asm index 43326128..13520783 100644 --- a/src/VBox/VMM/VMMRC/CPUMRCA.asm +++ b/src/VBox/VMM/VMMRC/CPUMRCA.asm @@ -43,6 +43,205 @@ extern NAME(CPUMRCAssertPreExecutionSanity) BEGINCODE +;; Macro for FXSAVE/FXRSTOR leaky behaviour on AMD CPUs, see cpumR3CheckLeakyFpu(). +; Cleans the FPU state, if necessary, before restoring the FPU. +; +; This macro ASSUMES CR0.TS is not set! +; @remarks Trashes xAX!! +; Changes here should also be reflected in CPUMR0A.asm's copy! +%macro CLEANFPU 0 + test dword [xDX + CPUMCPU.fUseFlags], CPUM_USE_FFXSR_LEAKY + jz .nothing_to_clean + + xor eax, eax + fnstsw ax ; Get FSW + test eax, RT_BIT(7) ; If FSW.ES (bit 7) is set, clear it to not cause FPU exceptions + ; while clearing & loading the FPU bits in 'clean_fpu' + jz clean_fpu + fnclex + +.clean_fpu: + ffree st7 ; Clear FPU stack register(7)'s tag entry to prevent overflow if a wraparound occurs + ; for the upcoming push (load) + fild dword [xDX + CPUMCPU.Guest.fpu] ; Explicit FPU load to overwrite FIP, FOP, FDP registers in the FPU. + +.nothing_to_clean: +%endmacro + + +;; +; Handles lazy FPU saving and restoring. +; +; This handler will implement lazy fpu (sse/mmx/stuff) saving. +; Two actions may be taken in this handler since the Guest OS may +; be doing lazy fpu switching. So, we'll have to generate those +; traps which the Guest CPU CTX shall have according to the +; its CR0 flags. If no traps for the Guest OS, we'll save the host +; context and restore the guest context. +; +; @returns 0 if caller should continue execution. +; @returns VINF_EM_RAW_GUEST_TRAP if a guest trap should be generated. +; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer +; +align 16 +BEGINPROC cpumHandleLazyFPUAsm + ; + ; Figure out what to do. + ; + ; There are two basic actions: + ; 1. Save host fpu and restore guest fpu. + ; 2. Generate guest trap. + ; + ; When entering the hypervisor we'll always enable MP (for proper wait + ; trapping) and TS (for intercepting all fpu/mmx/sse stuff). The EM flag + ; is taken from the guest OS in order to get proper SSE handling. + ; + ; + ; Actions taken depending on the guest CR0 flags: + ; + ; 3 2 1 + ; TS | EM | MP | FPUInstr | WAIT :: VMM Action + ; ------------------------------------------------------------------------ + ; 0 | 0 | 0 | Exec | Exec :: Clear TS & MP, Save HC, Load GC. + ; 0 | 0 | 1 | Exec | Exec :: Clear TS, Save HC, Load GC. + ; 0 | 1 | 0 | #NM | Exec :: Clear TS & MP, Save HC, Load GC; + ; 0 | 1 | 1 | #NM | Exec :: Clear TS, Save HC, Load GC. + ; 1 | 0 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already cleared.) + ; 1 | 0 | 1 | #NM | #NM :: Go to host taking trap there. + ; 1 | 1 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already set.) + ; 1 | 1 | 1 | #NM | #NM :: Go to host taking trap there. + + ; + ; Before taking any of these actions we're checking if we have already + ; loaded the GC FPU. Because if we have, this is an trap for the guest - raw ring-3. + ; +%ifdef RT_ARCH_AMD64 + %ifdef RT_OS_WINDOWS + mov xDX, rcx + %else + mov xDX, rdi + %endif +%else + mov xDX, dword [esp + 4] +%endif + test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU + jz hlfpua_not_loaded + jmp hlfpua_to_host + + ; + ; Take action. + ; +align 16 +hlfpua_not_loaded: + mov eax, [xDX + CPUMCPU.Guest.cr0] + and eax, X86_CR0_MP | X86_CR0_EM | X86_CR0_TS +%ifdef RT_ARCH_AMD64 + lea r8, [hlfpuajmp1 wrt rip] + jmp qword [rax*4 + r8] +%else + jmp dword [eax*2 + hlfpuajmp1] +%endif +align 16 +;; jump table using fpu related cr0 flags as index. +hlfpuajmp1: + RTCCPTR_DEF hlfpua_switch_fpu_ctx + RTCCPTR_DEF hlfpua_switch_fpu_ctx + RTCCPTR_DEF hlfpua_switch_fpu_ctx + RTCCPTR_DEF hlfpua_switch_fpu_ctx + RTCCPTR_DEF hlfpua_switch_fpu_ctx + RTCCPTR_DEF hlfpua_to_host + RTCCPTR_DEF hlfpua_switch_fpu_ctx + RTCCPTR_DEF hlfpua_to_host +;; and mask for cr0. +hlfpu_afFlags: + RTCCPTR_DEF ~(X86_CR0_TS | X86_CR0_MP) + RTCCPTR_DEF ~(X86_CR0_TS) + RTCCPTR_DEF ~(X86_CR0_TS | X86_CR0_MP) + RTCCPTR_DEF ~(X86_CR0_TS) + RTCCPTR_DEF ~(X86_CR0_MP) + RTCCPTR_DEF 0 + RTCCPTR_DEF ~(X86_CR0_MP) + RTCCPTR_DEF 0 + + ; + ; Action - switch FPU context and change cr0 flags. + ; +align 16 +hlfpua_switch_fpu_ctx: + ; Paranoia. This function was previously used in ring-0, not any longer. +%ifdef IN_RING3 +%error "This function is not written for ring-3" +%endif +%ifdef IN_RING0 +%error "This function is not written for ring-0" +%endif + + mov xCX, cr0 +%ifdef RT_ARCH_AMD64 + lea r8, [hlfpu_afFlags wrt rip] + and rcx, [rax*4 + r8] ; calc the new cr0 flags. +%else + and ecx, [eax*2 + hlfpu_afFlags] ; calc the new cr0 flags. +%endif + mov xAX, cr0 + and xAX, ~(X86_CR0_TS | X86_CR0_EM) + mov cr0, xAX ; clear flags so we don't trap here. +%ifndef RT_ARCH_AMD64 + mov eax, edx ; Calculate the PCPUM pointer + sub eax, [edx + CPUMCPU.offCPUM] + test dword [eax + CPUM.CPUFeatures.edx], X86_CPUID_FEATURE_EDX_FXSR + jz short hlfpua_no_fxsave +%endif + +%ifdef RT_ARCH_AMD64 + ; Use explicit REX prefix. See @bugref{6398}. + o64 fxsave [xDX + CPUMCPU.Host.fpu] +%else + fxsave [xDX + CPUMCPU.Host.fpu] +%endif + or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM) +%ifdef RT_ARCH_AMD64 + o64 fxrstor [xDX + CPUMCPU.Guest.fpu] +%else + fxrstor [xDX + CPUMCPU.Guest.fpu] +%endif +hlfpua_finished_switch: + + ; Load new CR0 value. + ;; @todo Optimize the many unconditional CR0 writes. + mov cr0, xCX ; load the new cr0 flags. + + ; return continue execution. + xor eax, eax + ret + +%ifndef RT_ARCH_AMD64 +; legacy support. +hlfpua_no_fxsave: + fnsave [xDX + CPUMCPU.Host.fpu] + or dword [xDX + CPUMCPU.fUseFlags], dword (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM) ; yasm / nasm + mov eax, [xDX + CPUMCPU.Guest.fpu] ; control word + not eax ; 1 means exception ignored (6 LS bits) + and eax, byte 03Fh ; 6 LS bits only + test eax, [xDX + CPUMCPU.Guest.fpu + 4] ; status word + jz short hlfpua_no_exceptions_pending + ; technically incorrect, but we certainly don't want any exceptions now!! + and dword [xDX + CPUMCPU.Guest.fpu + 4], ~03Fh +hlfpua_no_exceptions_pending: + frstor [xDX + CPUMCPU.Guest.fpu] + jmp near hlfpua_finished_switch +%endif ; !RT_ARCH_AMD64 + + + ; + ; Action - Generate Guest trap. + ; +hlfpua_action_4: +hlfpua_to_host: + mov eax, VINF_EM_RAW_GUEST_TRAP + ret +ENDPROC cpumHandleLazyFPUAsm + ;; ; Calls a guest trap/interrupt handler directly diff --git a/src/VBox/VMM/VMMRC/CSAMRC.cpp b/src/VBox/VMM/VMMRC/CSAMRC.cpp index 1d697b0b..f1745f8e 100644 --- a/src/VBox/VMM/VMMRC/CSAMRC.cpp +++ b/src/VBox/VMM/VMMRC/CSAMRC.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2007 Oracle Corporation + * Copyright (C) 2006-2013 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -76,7 +76,7 @@ VMMRCDECL(int) CSAMGCCodePageWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTX REMFlushTBs(pVM); #endif - pPATMGCState = PATMQueryGCState(pVM); + pPATMGCState = PATMGetGCState(pVM); Assert(pPATMGCState); Assert(pPATMGCState->fPIF || fPatchCode); @@ -108,7 +108,7 @@ VMMRCDECL(int) CSAMGCCodePageWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTX /* If user code is modifying one of our monitored pages, then we can safely make it r/w as it's no longer being used for supervisor code. */ if (cpl != 3) { - rc = PATMGCHandleWriteToPatchPage(pVM, pRegFrame, (RTRCPTR)((RTRCUINTPTR)pvRange + offRange), 4 /** @todo */); + rc = PATMRCHandleWriteToPatchPage(pVM, pRegFrame, (RTRCPTR)((RTRCUINTPTR)pvRange + offRange), 4 /** @todo */); if (rc == VINF_SUCCESS) return rc; if (rc == VINF_EM_RAW_EMULATE_INSTR) diff --git a/src/VBox/VMM/VMMRC/EMRCA.asm b/src/VBox/VMM/VMMRC/EMRCA.asm index f295c55a..8f22bf3d 100644 --- a/src/VBox/VMM/VMMRC/EMRCA.asm +++ b/src/VBox/VMM/VMMRC/EMRCA.asm @@ -4,7 +4,7 @@ ; ; -; Copyright (C) 2006-2007 Oracle Corporation +; Copyright (C) 2006-2011 Oracle Corporation ; ; This file is part of VirtualBox Open Source Edition (OSE), as ; available from http://www.virtualbox.org. This file is free software; diff --git a/src/VBox/VMM/VMMRC/HWACCMRCA.asm b/src/VBox/VMM/VMMRC/HWACCMRCA.asm deleted file mode 100644 index c0f5c55e..00000000 --- a/src/VBox/VMM/VMMRC/HWACCMRCA.asm +++ /dev/null @@ -1,586 +0,0 @@ -; $Id: HWACCMRCA.asm $ -;; @file -; VMXM - GC vmx helpers -; - -; -; Copyright (C) 2006-2012 Oracle Corporation -; -; This file is part of VirtualBox Open Source Edition (OSE), as -; available from http://www.virtualbox.org. This file is free software; -; you can redistribute it and/or modify it under the terms of the GNU -; General Public License (GPL) as published by the Free Software -; Foundation, in version 2 as it comes in the "COPYING" file of the -; VirtualBox OSE distribution. VirtualBox OSE is distributed in the -; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. -; - -;******************************************************************************* -;* Header Files * -;******************************************************************************* -%undef RT_ARCH_X86 -%define RT_ARCH_AMD64 -%include "VBox/asmdefs.mac" -%include "VBox/err.mac" -%include "VBox/vmm/hwacc_vmx.mac" -%include "VBox/vmm/cpum.mac" -%include "iprt/x86.mac" -%include "HWACCMInternal.mac" - -%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely. - %macro vmwrite 2, - int3 - %endmacro - %define vmlaunch int3 - %define vmresume int3 - %define vmsave int3 - %define vmload int3 - %define vmrun int3 - %define clgi int3 - %define stgi int3 - %macro invlpga 2, - int3 - %endmacro -%endif - -;; @def MYPUSHSEGS -; Macro saving all segment registers on the stack. -; @param 1 full width register name - -;; @def MYPOPSEGS -; Macro restoring all segment registers on the stack -; @param 1 full width register name - - ; Load the corresponding guest MSR (trashes rdx & rcx) - %macro LOADGUESTMSR 2 - mov rcx, %1 - mov edx, dword [rsi + %2 + 4] - mov eax, dword [rsi + %2] - wrmsr - %endmacro - - ; Save a guest MSR (trashes rdx & rcx) - ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs) - %macro SAVEGUESTMSR 2 - mov rcx, %1 - rdmsr - mov dword [rsi + %2], eax - mov dword [rsi + %2 + 4], edx - %endmacro - - %macro MYPUSHSEGS 1 - mov %1, es - push %1 - mov %1, ds - push %1 - %endmacro - - %macro MYPOPSEGS 1 - pop %1 - mov ds, %1 - pop %1 - mov es, %1 - %endmacro - -BEGINCODE -BITS 64 - - -;/** -; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode) -; * -; * @returns VBox status code -; * @param HCPhysCpuPage VMXON physical address [rsp+8] -; * @param HCPhysVMCS VMCS physical address [rsp+16] -; * @param pCache VMCS cache [rsp+24] -; * @param pCtx Guest context (rsi) -; */ -BEGINPROC VMXGCStartVM64 - push rbp - mov rbp, rsp - - ; Make sure VT-x instructions are allowed - mov rax, cr4 - or rax, X86_CR4_VMXE - mov cr4, rax - - ;/* Enter VMX Root Mode */ - vmxon [rbp + 8 + 8] - jnc .vmxon_success - mov rax, VERR_VMX_INVALID_VMXON_PTR - jmp .vmstart64_vmxon_failed - -.vmxon_success: - jnz .vmxon_success2 - mov rax, VERR_VMX_GENERIC - jmp .vmstart64_vmxon_failed - -.vmxon_success2: - ; Activate the VMCS pointer - vmptrld [rbp + 16 + 8] - jnc .vmptrld_success - mov rax, VERR_VMX_INVALID_VMCS_PTR - jmp .vmstart64_vmxoff_end - -.vmptrld_success: - jnz .vmptrld_success2 - mov rax, VERR_VMX_GENERIC - jmp .vmstart64_vmxoff_end - -.vmptrld_success2: - - ; Save the VMCS pointer on the stack - push qword [rbp + 16 + 8]; - - ;/* Save segment registers */ - MYPUSHSEGS rax - -%ifdef VMX_USE_CACHED_VMCS_ACCESSES - ; Flush the VMCS write cache first (before any other vmreads/vmwrites!) - mov rbx, [rbp + 24 + 8] ; pCache - -%ifdef VBOX_WITH_CRASHDUMP_MAGIC - mov qword [rbx + VMCSCACHE.uPos], 2 -%endif - -%ifdef DEBUG - mov rax, [rbp + 8 + 8] ; HCPhysCpuPage - mov [rbx + VMCSCACHE.TestIn.HCPhysCpuPage], rax - mov rax, [rbp + 16 + 8] ; HCPhysVMCS - mov [rbx + VMCSCACHE.TestIn.HCPhysVMCS], rax - mov [rbx + VMCSCACHE.TestIn.pCache], rbx - mov [rbx + VMCSCACHE.TestIn.pCtx], rsi -%endif - - mov ecx, [rbx + VMCSCACHE.Write.cValidEntries] - cmp ecx, 0 - je .no_cached_writes - mov rdx, rcx - mov rcx, 0 - jmp .cached_write - -ALIGN(16) -.cached_write: - mov eax, [rbx + VMCSCACHE.Write.aField + rcx*4] - vmwrite rax, qword [rbx + VMCSCACHE.Write.aFieldVal + rcx*8] - inc rcx - cmp rcx, rdx - jl .cached_write - - mov dword [rbx + VMCSCACHE.Write.cValidEntries], 0 -.no_cached_writes: - - %ifdef VBOX_WITH_CRASHDUMP_MAGIC - mov qword [rbx + VMCSCACHE.uPos], 3 - %endif - ; Save the pCache pointer - push xBX -%endif - - ; Save the host state that's relevant in the temporary 64 bits mode - mov rdx, cr0 - mov eax, VMX_VMCS_HOST_CR0 - vmwrite rax, rdx - - mov rdx, cr3 - mov eax, VMX_VMCS_HOST_CR3 - vmwrite rax, rdx - - mov rdx, cr4 - mov eax, VMX_VMCS_HOST_CR4 - vmwrite rax, rdx - - mov rdx, cs - mov eax, VMX_VMCS_HOST_FIELD_CS - vmwrite rax, rdx - - mov rdx, ss - mov eax, VMX_VMCS_HOST_FIELD_SS - vmwrite rax, rdx - - sub rsp, 8*2 - sgdt [rsp] - mov eax, VMX_VMCS_HOST_GDTR_BASE - vmwrite rax, [rsp+2] - add rsp, 8*2 - -%ifdef VBOX_WITH_CRASHDUMP_MAGIC - mov qword [rbx + VMCSCACHE.uPos], 4 -%endif - - ; hopefully we can ignore TR (we restore it anyway on the way back to 32 bits mode) - - ;/* First we have to save some final CPU context registers. */ - lea rdx, [.vmlaunch64_done wrt rip] - mov rax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */ - vmwrite rax, rdx - ;/* Note: assumes success... */ - - ;/* Manual save and restore: - ; * - General purpose registers except RIP, RSP - ; * - ; * Trashed: - ; * - CR2 (we don't care) - ; * - LDTR (reset to 0) - ; * - DRx (presumably not changed at all) - ; * - DR7 (reset to 0x400) - ; * - EFLAGS (reset to RT_BIT(1); not relevant) - ; * - ; */ - - ; Load the guest LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs - ;; @todo use the automatic load feature for MSRs - LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR - LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR - LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK - LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE - -%ifdef VBOX_WITH_CRASHDUMP_MAGIC - mov qword [rbx + VMCSCACHE.uPos], 5 -%endif - - ; Save the pCtx pointer - push rsi - - ; Restore CR2 - mov rbx, qword [rsi + CPUMCTX.cr2] - mov cr2, rbx - - mov eax, VMX_VMCS_HOST_RSP - vmwrite rax, rsp - ;/* Note: assumes success... */ - ;/* Don't mess with ESP anymore!! */ - - ;/* Restore Guest's general purpose registers. */ - mov rax, qword [rsi + CPUMCTX.eax] - mov rbx, qword [rsi + CPUMCTX.ebx] - mov rcx, qword [rsi + CPUMCTX.ecx] - mov rdx, qword [rsi + CPUMCTX.edx] - mov rbp, qword [rsi + CPUMCTX.ebp] - mov r8, qword [rsi + CPUMCTX.r8] - mov r9, qword [rsi + CPUMCTX.r9] - mov r10, qword [rsi + CPUMCTX.r10] - mov r11, qword [rsi + CPUMCTX.r11] - mov r12, qword [rsi + CPUMCTX.r12] - mov r13, qword [rsi + CPUMCTX.r13] - mov r14, qword [rsi + CPUMCTX.r14] - mov r15, qword [rsi + CPUMCTX.r15] - - ;/* Restore rdi & rsi. */ - mov rdi, qword [rsi + CPUMCTX.edi] - mov rsi, qword [rsi + CPUMCTX.esi] - - vmlaunch - jmp .vmlaunch64_done; ;/* here if vmlaunch detected a failure. */ - -ALIGNCODE(16) -.vmlaunch64_done: - jc near .vmstart64_invalid_vmxon_ptr - jz near .vmstart64_start_failed - - push rdi - mov rdi, [rsp + 8] ; pCtx - - mov qword [rdi + CPUMCTX.eax], rax - mov qword [rdi + CPUMCTX.ebx], rbx - mov qword [rdi + CPUMCTX.ecx], rcx - mov qword [rdi + CPUMCTX.edx], rdx - mov qword [rdi + CPUMCTX.esi], rsi - mov qword [rdi + CPUMCTX.ebp], rbp - mov qword [rdi + CPUMCTX.r8], r8 - mov qword [rdi + CPUMCTX.r9], r9 - mov qword [rdi + CPUMCTX.r10], r10 - mov qword [rdi + CPUMCTX.r11], r11 - mov qword [rdi + CPUMCTX.r12], r12 - mov qword [rdi + CPUMCTX.r13], r13 - mov qword [rdi + CPUMCTX.r14], r14 - mov qword [rdi + CPUMCTX.r15], r15 - - pop rax ; the guest edi we pushed above - mov qword [rdi + CPUMCTX.edi], rax - - pop rsi ; pCtx (needed in rsi by the macros below) - - ;; @todo use the automatic load feature for MSRs - SAVEGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR - SAVEGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR - SAVEGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK - SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE - -%ifdef VMX_USE_CACHED_VMCS_ACCESSES - pop rdi ; saved pCache - - %ifdef VBOX_WITH_CRASHDUMP_MAGIC - mov dword [rdi + VMCSCACHE.uPos], 7 - %endif - %ifdef DEBUG - mov [rdi + VMCSCACHE.TestOut.pCache], rdi - mov [rdi + VMCSCACHE.TestOut.pCtx], rsi - mov rax, cr8 - mov [rdi + VMCSCACHE.TestOut.cr8], rax - %endif - - mov ecx, [rdi + VMCSCACHE.Read.cValidEntries] - cmp ecx, 0 ; can't happen - je .no_cached_reads - jmp .cached_read - -ALIGN(16) -.cached_read: - dec rcx - mov eax, [rdi + VMCSCACHE.Read.aField + rcx*4] - vmread qword [rdi + VMCSCACHE.Read.aFieldVal + rcx*8], rax - cmp rcx, 0 - jnz .cached_read -.no_cached_reads: - - ; Save CR2 for EPT - mov rax, cr2 - mov [rdi + VMCSCACHE.cr2], rax - %ifdef VBOX_WITH_CRASHDUMP_MAGIC - mov dword [rdi + VMCSCACHE.uPos], 8 - %endif -%endif - - ; Restore segment registers - MYPOPSEGS rax - - mov eax, VINF_SUCCESS - -%ifdef VBOX_WITH_CRASHDUMP_MAGIC - mov dword [rdi + VMCSCACHE.uPos], 9 -%endif -.vmstart64_end: - -%ifdef VMX_USE_CACHED_VMCS_ACCESSES - %ifdef DEBUG - mov rdx, [rsp] ; HCPhysVMCS - mov [rdi + VMCSCACHE.TestOut.HCPhysVMCS], rdx - %endif -%endif - - ; Write back the data and disable the VMCS - vmclear qword [rsp] ;Pushed pVMCS - add rsp, 8 - -.vmstart64_vmxoff_end: - ; Disable VMX root mode - vmxoff -.vmstart64_vmxon_failed: -%ifdef VMX_USE_CACHED_VMCS_ACCESSES - %ifdef DEBUG - cmp eax, VINF_SUCCESS - jne .skip_flags_save - - pushf - pop rdx - mov [rdi + VMCSCACHE.TestOut.eflags], rdx - %ifdef VBOX_WITH_CRASHDUMP_MAGIC - mov dword [rdi + VMCSCACHE.uPos], 12 - %endif -.skip_flags_save: - %endif -%endif - pop rbp - ret - - -.vmstart64_invalid_vmxon_ptr: - pop rsi ; pCtx (needed in rsi by the macros below) - -%ifdef VMX_USE_CACHED_VMCS_ACCESSES - pop rdi ; pCache - %ifdef VBOX_WITH_CRASHDUMP_MAGIC - mov dword [rdi + VMCSCACHE.uPos], 10 - %endif - - %ifdef DEBUG - mov [rdi + VMCSCACHE.TestOut.pCache], rdi - mov [rdi + VMCSCACHE.TestOut.pCtx], rsi - %endif -%endif - - ; Restore segment registers - MYPOPSEGS rax - - ; Restore all general purpose host registers. - mov eax, VERR_VMX_INVALID_VMXON_PTR - jmp .vmstart64_end - -.vmstart64_start_failed: - pop rsi ; pCtx (needed in rsi by the macros below) - -%ifdef VMX_USE_CACHED_VMCS_ACCESSES - pop rdi ; pCache - - %ifdef DEBUG - mov [rdi + VMCSCACHE.TestOut.pCache], rdi - mov [rdi + VMCSCACHE.TestOut.pCtx], rsi - %endif - %ifdef VBOX_WITH_CRASHDUMP_MAGIC - mov dword [rdi + VMCSCACHE.uPos], 11 - %endif -%endif - - ; Restore segment registers - MYPOPSEGS rax - - ; Restore all general purpose host registers. - mov eax, VERR_VMX_UNABLE_TO_START_VM - jmp .vmstart64_end -ENDPROC VMXGCStartVM64 - - -;/** -; * Prepares for and executes VMRUN (64 bits guests) -; * -; * @returns VBox status code -; * @param HCPhysVMCB Physical address of host VMCB (rsp+8) -; * @param HCPhysVMCB Physical address of guest VMCB (rsp+16) -; * @param pCtx Guest context (rsi) -; */ -BEGINPROC SVMGCVMRun64 - push rbp - mov rbp, rsp - pushf - - ;/* Manual save and restore: - ; * - General purpose registers except RIP, RSP, RAX - ; * - ; * Trashed: - ; * - CR2 (we don't care) - ; * - LDTR (reset to 0) - ; * - DRx (presumably not changed at all) - ; * - DR7 (reset to 0x400) - ; */ - - ;/* Save the Guest CPU context pointer. */ - push rsi ; push for saving the state at the end - - ; save host fs, gs, sysenter msr etc - mov rax, [rbp + 8 + 8] ; pVMCBHostPhys (64 bits physical address) - push rax ; save for the vmload after vmrun - vmsave - - ; setup eax for VMLOAD - mov rax, [rbp + 8 + 8 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address) - - ;/* Restore Guest's general purpose registers. */ - ;/* RAX is loaded from the VMCB by VMRUN */ - mov rbx, qword [rsi + CPUMCTX.ebx] - mov rcx, qword [rsi + CPUMCTX.ecx] - mov rdx, qword [rsi + CPUMCTX.edx] - mov rdi, qword [rsi + CPUMCTX.edi] - mov rbp, qword [rsi + CPUMCTX.ebp] - mov r8, qword [rsi + CPUMCTX.r8] - mov r9, qword [rsi + CPUMCTX.r9] - mov r10, qword [rsi + CPUMCTX.r10] - mov r11, qword [rsi + CPUMCTX.r11] - mov r12, qword [rsi + CPUMCTX.r12] - mov r13, qword [rsi + CPUMCTX.r13] - mov r14, qword [rsi + CPUMCTX.r14] - mov r15, qword [rsi + CPUMCTX.r15] - mov rsi, qword [rsi + CPUMCTX.esi] - - ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch - clgi - sti - - ; load guest fs, gs, sysenter msr etc - vmload - ; run the VM - vmrun - - ;/* RAX is in the VMCB already; we can use it here. */ - - ; save guest fs, gs, sysenter msr etc - vmsave - - ; load host fs, gs, sysenter msr etc - pop rax ; pushed above - vmload - - ; Set the global interrupt flag again, but execute cli to make sure IF=0. - cli - stgi - - pop rax ; pCtx - - mov qword [rax + CPUMCTX.ebx], rbx - mov qword [rax + CPUMCTX.ecx], rcx - mov qword [rax + CPUMCTX.edx], rdx - mov qword [rax + CPUMCTX.esi], rsi - mov qword [rax + CPUMCTX.edi], rdi - mov qword [rax + CPUMCTX.ebp], rbp - mov qword [rax + CPUMCTX.r8], r8 - mov qword [rax + CPUMCTX.r9], r9 - mov qword [rax + CPUMCTX.r10], r10 - mov qword [rax + CPUMCTX.r11], r11 - mov qword [rax + CPUMCTX.r12], r12 - mov qword [rax + CPUMCTX.r13], r13 - mov qword [rax + CPUMCTX.r14], r14 - mov qword [rax + CPUMCTX.r15], r15 - - mov eax, VINF_SUCCESS - - popf - pop rbp - ret -ENDPROC SVMGCVMRun64 - -;/** -; * Saves the guest FPU context -; * -; * @returns VBox status code -; * @param pCtx Guest context [rsi] -; */ -BEGINPROC HWACCMSaveGuestFPU64 - mov rax, cr0 - mov rcx, rax ; save old CR0 - and rax, ~(X86_CR0_TS | X86_CR0_EM) - mov cr0, rax - - fxsave [rsi + CPUMCTX.fpu] - - mov cr0, rcx ; and restore old CR0 again - - mov eax, VINF_SUCCESS - ret -ENDPROC HWACCMSaveGuestFPU64 - -;/** -; * Saves the guest debug context (DR0-3, DR6) -; * -; * @returns VBox status code -; * @param pCtx Guest context [rsi] -; */ -BEGINPROC HWACCMSaveGuestDebug64 - mov rax, dr0 - mov qword [rsi + CPUMCTX.dr + 0*8], rax - mov rax, dr1 - mov qword [rsi + CPUMCTX.dr + 1*8], rax - mov rax, dr2 - mov qword [rsi + CPUMCTX.dr + 2*8], rax - mov rax, dr3 - mov qword [rsi + CPUMCTX.dr + 3*8], rax - mov rax, dr6 - mov qword [rsi + CPUMCTX.dr + 6*8], rax - mov eax, VINF_SUCCESS - ret -ENDPROC HWACCMSaveGuestDebug64 - -;/** -; * Dummy callback handler -; * -; * @returns VBox status code -; * @param param1 Parameter 1 [rsp+8] -; * @param param2 Parameter 2 [rsp+12] -; * @param param3 Parameter 3 [rsp+16] -; * @param param4 Parameter 4 [rsp+20] -; * @param param5 Parameter 5 [rsp+24] -; * @param pCtx Guest context [rsi] -; */ -BEGINPROC HWACCMTestSwitcher64 - mov eax, [rsp+8] - ret -ENDPROC HWACCMTestSwitcher64 diff --git a/src/VBox/VMM/VMMRC/IOMRC.cpp b/src/VBox/VMM/VMMRC/IOMRC.cpp index 397ca962..f0d8685b 100644 --- a/src/VBox/VMM/VMMRC/IOMRC.cpp +++ b/src/VBox/VMM/VMMRC/IOMRC.cpp @@ -61,26 +61,27 @@ * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr) * * @param pVM The virtual machine handle. + * @param pVCpu Pointer to the virtual CPU structure of the caller. * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure. * @param pCpu Disassembler CPU state. */ -VMMRCDECL(VBOXSTRICTRC) IOMRCIOPortHandler(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu) +VMMRCDECL(VBOXSTRICTRC) IOMRCIOPortHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu) { switch (pCpu->pCurInstr->uOpcode) { case OP_IN: - return IOMInterpretIN(pVM, pRegFrame, pCpu); + return IOMInterpretIN(pVM, pVCpu, pRegFrame, pCpu); case OP_OUT: - return IOMInterpretOUT(pVM, pRegFrame, pCpu); + return IOMInterpretOUT(pVM, pVCpu, pRegFrame, pCpu); case OP_INSB: case OP_INSWD: - return IOMInterpretINS(pVM, pRegFrame, pCpu); + return IOMInterpretINS(pVM, pVCpu, pRegFrame, pCpu); case OP_OUTSB: case OP_OUTSWD: - return IOMInterpretOUTS(pVM, pRegFrame, pCpu); + return IOMInterpretOUTS(pVM, pVCpu, pRegFrame, pCpu); /* * The opcode wasn't know to us, freak out. diff --git a/src/VBox/VMM/VMMRC/MMRamRC.cpp b/src/VBox/VMM/VMMRC/MMRamRC.cpp index d1283ef3..45e1270e 100644 --- a/src/VBox/VMM/VMMRC/MMRamRC.cpp +++ b/src/VBox/VMM/VMMRC/MMRamRC.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2007 Oracle Corporation + * Copyright (C) 2006-2012 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; diff --git a/src/VBox/VMM/VMMRC/MMRamRCA.asm b/src/VBox/VMM/VMMRC/MMRamRCA.asm index ad89ca2e..f46ecd8c 100644 --- a/src/VBox/VMM/VMMRC/MMRamRCA.asm +++ b/src/VBox/VMM/VMMRC/MMRamRCA.asm @@ -4,7 +4,7 @@ ; ; -; Copyright (C) 2006-2007 Oracle Corporation +; Copyright (C) 2006-2011 Oracle Corporation ; ; This file is part of VirtualBox Open Source Edition (OSE), as ; available from http://www.virtualbox.org. This file is free software; diff --git a/src/VBox/VMM/VMMRC/PATMRC.cpp b/src/VBox/VMM/VMMRC/PATMRC.cpp index 30df71fc..a7474b4c 100644 --- a/src/VBox/VMM/VMMRC/PATMRC.cpp +++ b/src/VBox/VMM/VMMRC/PATMRC.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2012 Oracle Corporation + * Copyright (C) 2006-2013 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -75,7 +75,7 @@ VMMRCDECL(int) PATMGCMonitorPage(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pReg * @param cbWrite Nr of bytes to write * */ -VMMRCDECL(int) PATMGCHandleWriteToPatchPage(PVM pVM, PCPUMCTXCORE pRegFrame, RTRCPTR GCPtr, uint32_t cbWrite) +VMMRC_INT_DECL(int) PATMRCHandleWriteToPatchPage(PVM pVM, PCPUMCTXCORE pRegFrame, RTRCPTR GCPtr, uint32_t cbWrite) { RTGCUINTPTR pWritePageStart, pWritePageEnd; PPATMPATCHPAGE pPatchPage; @@ -147,14 +147,15 @@ VMMRCDECL(int) PATMGCHandleWriteToPatchPage(PVM pVM, PCPUMCTXCORE pRegFrame, RTR * @param pVM Pointer to the VM. * @param pCtxCore The relevant core context. */ -VMMDECL(int) PATMRCHandleIllegalInstrTrap(PVM pVM, PCPUMCTXCORE pRegFrame) +VMMRC_INT_DECL(int) PATMRCHandleIllegalInstrTrap(PVM pVM, PCPUMCTXCORE pRegFrame) { PPATMPATCHREC pRec; PVMCPU pVCpu = VMMGetCpu0(pVM); int rc; /* Very important check -> otherwise we have a security leak. */ - AssertReturn(!pRegFrame->eflags.Bits.u1VM && (pRegFrame->ss.Sel & X86_SEL_RPL) == 1, VERR_ACCESS_DENIED); + AssertReturn(!pRegFrame->eflags.Bits.u1VM && (pRegFrame->ss.Sel & X86_SEL_RPL) <= (EMIsRawRing1Enabled(pVM) ? 2U : 1U), + VERR_ACCESS_DENIED); Assert(PATMIsPatchGCAddr(pVM, pRegFrame->eip)); /* OP_ILLUD2 in PATM generated code? */ @@ -185,13 +186,13 @@ VMMDECL(int) PATMRCHandleIllegalInstrTrap(PVM pVM, PCPUMCTXCORE pRegFrame) Log(("PATMRC: lookup %x jump table=%x\n", pRegFrame->edx, pRegFrame->edi)); - pRec = PATMQueryFunctionPatch(pVM, (RTRCPTR)(pRegFrame->edx)); + pRec = patmQueryFunctionPatch(pVM, (RTRCPTR)pRegFrame->edx); if (pRec) { if (pRec->patch.uState == PATCH_ENABLED) { RTGCUINTPTR pRelAddr = pRec->patch.pPatchBlockOffset; /* make it relative */ - rc = PATMAddBranchToLookupCache(pVM, (RTRCPTR)pRegFrame->edi, (RTRCPTR)pRegFrame->edx, pRelAddr); + rc = patmAddBranchToLookupCache(pVM, (RTRCPTR)pRegFrame->edi, (RTRCPTR)pRegFrame->edx, pRelAddr); if (rc == VINF_SUCCESS) { Log(("Patch block %RRv called as function\n", pRec->patch.pPrivInstrGC)); @@ -449,12 +450,14 @@ VMMDECL(int) PATMRCHandleIllegalInstrTrap(PVM pVM, PCPUMCTXCORE pRegFrame) * @param pVM Pointer to the VM. * @param pCtxCore The relevant core context. */ -VMMRCDECL(int) PATMRCHandleInt3PatchTrap(PVM pVM, PCPUMCTXCORE pRegFrame) +VMMRC_INT_DECL(int) PATMRCHandleInt3PatchTrap(PVM pVM, PCPUMCTXCORE pRegFrame) { PPATMPATCHREC pRec; int rc; - AssertReturn(!pRegFrame->eflags.Bits.u1VM && (pRegFrame->ss.Sel & X86_SEL_RPL) == 1, VERR_ACCESS_DENIED); + AssertReturn(!pRegFrame->eflags.Bits.u1VM + && ( (pRegFrame->ss.Sel & X86_SEL_RPL) == 1 + || (EMIsRawRing1Enabled(pVM) && (pRegFrame->ss.Sel & X86_SEL_RPL) == 2)), VERR_ACCESS_DENIED); /* Int 3 in PATM generated code? (most common case) */ if (PATMIsPatchGCAddr(pVM, pRegFrame->eip)) @@ -489,6 +492,10 @@ VMMRCDECL(int) PATMRCHandleInt3PatchTrap(PVM pVM, PCPUMCTXCORE pRegFrame) { case OP_CPUID: case OP_IRET: +#ifdef VBOX_WITH_RAW_RING1 + case OP_SMSW: + case OP_MOV: /* mov xx, CS */ +#endif break; case OP_STR: @@ -497,7 +504,9 @@ VMMRCDECL(int) PATMRCHandleInt3PatchTrap(PVM pVM, PCPUMCTXCORE pRegFrame) case OP_SIDT: case OP_LSL: case OP_LAR: +#ifndef VBOX_WITH_RAW_RING1 case OP_SMSW: +#endif case OP_VERW: case OP_VERR: default: diff --git a/src/VBox/VMM/VMMRC/PDMRCDevice.cpp b/src/VBox/VMM/VMMRC/PDMRCDevice.cpp index 5d2a3387..342e2f90 100644 --- a/src/VBox/VMM/VMMRC/PDMRCDevice.cpp +++ b/src/VBox/VMM/VMMRC/PDMRCDevice.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2011 Oracle Corporation + * Copyright (C) 2006-2013 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -54,13 +54,6 @@ RT_C_DECLS_END /******************************************************************************* -* Prototypes * -*******************************************************************************/ -static int pdmRCDevHlp_PhysRead(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead); -static int pdmRCDevHlp_PhysWrite(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite); - - -/******************************************************************************* * Internal Functions * *******************************************************************************/ static bool pdmRCIsaSetIrq(PVM pVM, int iIrq, int iLevel, uint32_t uTagSrc); @@ -74,22 +67,23 @@ static bool pdmRCIsaSetIrq(PVM pVM, int iIrq, int iLevel, uint32_t uTagSrc); static DECLCALLBACK(int) pdmRCDevHlp_PCIPhysRead(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead) { PDMDEV_ASSERT_DEVINS(pDevIns); - LogFlow(("pdmRCDevHlp_PCIPhysRead: caller=%p/%d: GCPhys=%RGp pvBuf=%p cbRead=%#x\n", - pDevIns, pDevIns->iInstance, GCPhys, pvBuf, cbRead)); - PCIDevice *pPciDev = pDevIns->Internal.s.pPciDeviceRC; - AssertPtrReturn(pPciDev, VERR_INVALID_POINTER); +#ifndef PDM_DO_NOT_RESPECT_PCI_BM_BIT + /* + * Just check the busmaster setting here and forward the request to the generic read helper. + */ + PPCIDEVICE pPciDev = pDevIns->Internal.s.pPciDeviceRC; + AssertReleaseMsg(pPciDev, ("No PCI device registered!\n")); if (!PCIDevIsBusmaster(pPciDev)) { -#ifdef DEBUG - LogFlow(("%s: %RU16:%RU16: No bus master (anymore), skipping read %p (%z)\n", __FUNCTION__, - PCIDevGetVendorId(pPciDev), PCIDevGetDeviceId(pPciDev), pvBuf, cbRead)); -#endif - return VINF_PDM_PCI_PHYS_READ_BM_DISABLED; + Log(("pdmRCDevHlp_PCIPhysRead: caller=%p/%d: returns %Rrc - Not bus master! GCPhys=%RGp cbRead=%#zx\n", + pDevIns, pDevIns->iInstance, VERR_PDM_NOT_PCI_BUS_MASTER, GCPhys, cbRead)); + return VERR_PDM_NOT_PCI_BUS_MASTER; } +#endif - return pdmRCDevHlp_PhysRead(pDevIns, GCPhys, pvBuf, cbRead); + return pDevIns->pHlpRC->pfnPhysRead(pDevIns, GCPhys, pvBuf, cbRead); } @@ -97,22 +91,21 @@ static DECLCALLBACK(int) pdmRCDevHlp_PCIPhysRead(PPDMDEVINS pDevIns, RTGCPHYS GC static DECLCALLBACK(int) pdmRCDevHlp_PCIPhysWrite(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite) { PDMDEV_ASSERT_DEVINS(pDevIns); - LogFlow(("pdmRCDevHlp_PCIPhysWrite: caller=%p/%d: GCPhys=%RGp pvBuf=%p cbWrite=%#x\n", - pDevIns, pDevIns->iInstance, GCPhys, pvBuf, cbWrite)); - PCIDevice *pPciDev = pDevIns->Internal.s.pPciDeviceRC; - AssertPtrReturn(pPciDev, VERR_INVALID_POINTER); + /* + * Just check the busmaster setting here and forward the request to the generic read helper. + */ + PPCIDEVICE pPciDev = pDevIns->Internal.s.pPciDeviceRC; + AssertReleaseMsg(pPciDev, ("No PCI device registered!\n")); if (!PCIDevIsBusmaster(pPciDev)) { -#ifdef DEBUG - LogFlow(("%s: %RU16:%RU16: No bus master (anymore), skipping write %p (%z)\n", __FUNCTION__, - PCIDevGetVendorId(pPciDev), PCIDevGetDeviceId(pPciDev), pvBuf, cbWrite)); -#endif - return VINF_PDM_PCI_PHYS_WRITE_BM_DISABLED; + Log(("pdmRCDevHlp_PCIPhysWrite: caller=%p/%d: returns %Rrc - Not bus master! GCPhys=%RGp cbWrite=%#zx\n", + pDevIns, pDevIns->iInstance, VERR_PDM_NOT_PCI_BUS_MASTER, GCPhys, cbWrite)); + return VERR_PDM_NOT_PCI_BUS_MASTER; } - return pdmRCDevHlp_PhysWrite(pDevIns, GCPhys, pvBuf, cbWrite); + return pDevIns->pHlpRC->pfnPhysWrite(pDevIns, GCPhys, pvBuf, cbWrite); } @@ -417,7 +410,7 @@ static DECLCALLBACK(void) pdmRCPicHlp_SetInterruptFF(PPDMDEVINS pDevIns) PVMCPU pVCpu = &pVM->aCpus[0]; /* for PIC we always deliver to CPU 0, MP use APIC */ LogFlow(("pdmRCPicHlp_SetInterruptFF: caller=%p/%d: VMMCPU_FF_INTERRUPT_PIC %d -> 1\n", - pDevIns, pDevIns->iInstance, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_PIC))); + pDevIns, pDevIns->iInstance, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC))); VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC); } @@ -442,7 +435,7 @@ static DECLCALLBACK(void) pdmRCPicHlp_ClearInterruptFF(PPDMDEVINS pDevIns) PVMCPU pVCpu = &pVM->aCpus[0]; /* for PIC we always deliver to CPU 0, MP use APIC */ LogFlow(("pdmRCPicHlp_ClearInterruptFF: caller=%p/%d: VMCPU_FF_INTERRUPT_PIC %d -> 0\n", - pDevIns, pDevIns->iInstance, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_PIC))); + pDevIns, pDevIns->iInstance, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC))); VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC); } @@ -496,7 +489,7 @@ static DECLCALLBACK(void) pdmRCApicHlp_SetInterruptFF(PPDMDEVINS pDevIns, PDMAPI AssertReturnVoid(idCpu < pVM->cCpus); LogFlow(("pdmRCApicHlp_SetInterruptFF: caller=%p/%d: VM_FF_INTERRUPT %d -> 1\n", - pDevIns, pDevIns->iInstance, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_APIC))); + pDevIns, pDevIns->iInstance, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC))); switch (enmType) { case PDMAPICIRQ_HARDWARE: @@ -528,7 +521,7 @@ static DECLCALLBACK(void) pdmRCApicHlp_ClearInterruptFF(PPDMDEVINS pDevIns, PDMA AssertReturnVoid(idCpu < pVM->cCpus); LogFlow(("pdmRCApicHlp_ClearInterruptFF: caller=%p/%d: VM_FF_INTERRUPT %d -> 0\n", - pDevIns, pDevIns->iInstance, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_APIC))); + pDevIns, pDevIns->iInstance, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC))); /* Note: NMI/SMI can't be cleared. */ switch (enmType) diff --git a/src/VBox/VMM/VMMRC/PGMRC.cpp b/src/VBox/VMM/VMMRC/PGMRC.cpp index b0c7185c..4137ee04 100644 --- a/src/VBox/VMM/VMMRC/PGMRC.cpp +++ b/src/VBox/VMM/VMMRC/PGMRC.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2010 Oracle Corporation + * Copyright (C) 2006-2012 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; diff --git a/src/VBox/VMM/VMMRC/PGMRCBth.h b/src/VBox/VMM/VMMRC/PGMRCBth.h index fd006035..8abd7db0 100644 --- a/src/VBox/VMM/VMMRC/PGMRCBth.h +++ b/src/VBox/VMM/VMMRC/PGMRCBth.h @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2007 Oracle Corporation + * Copyright (C) 2006-2010 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; diff --git a/src/VBox/VMM/VMMRC/PGMRCGst.h b/src/VBox/VMM/VMMRC/PGMRCGst.h index bf06afd2..82200f24 100644 --- a/src/VBox/VMM/VMMRC/PGMRCGst.h +++ b/src/VBox/VMM/VMMRC/PGMRCGst.h @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2007 Oracle Corporation + * Copyright (C) 2006-2010 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; diff --git a/src/VBox/VMM/VMMRC/PGMRCShw.h b/src/VBox/VMM/VMMRC/PGMRCShw.h index 265f1687..f7c1b54f 100644 --- a/src/VBox/VMM/VMMRC/PGMRCShw.h +++ b/src/VBox/VMM/VMMRC/PGMRCShw.h @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2007 Oracle Corporation + * Copyright (C) 2006-2010 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; diff --git a/src/VBox/VMM/VMMRC/SELMRC.cpp b/src/VBox/VMM/VMMRC/SELMRC.cpp index 2fd3e7af..65367f39 100644 --- a/src/VBox/VMM/VMMRC/SELMRC.cpp +++ b/src/VBox/VMM/VMMRC/SELMRC.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2007 Oracle Corporation + * Copyright (C) 2006-2012 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -34,6 +34,8 @@ #include <iprt/assert.h> #include <iprt/asm.h> +#include "SELMInline.h" + /******************************************************************************* * Global Variables * @@ -44,6 +46,7 @@ static char const g_aszSRegNms[X86_SREG_COUNT][4] = { "ES", "CS", "SS", "DS", "F #endif +#ifdef SELM_TRACK_GUEST_GDT_CHANGES /** * Synchronizes one GDT entry (guest -> shadow). * @@ -122,7 +125,7 @@ static VBOXSTRICTRC selmRCSyncGDTEntry(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegF /* * Convert the guest selector to a shadow selector and update the shadow GDT. */ - selmGuestToShadowDesc(&Desc); + selmGuestToShadowDesc(pVM, &Desc); PX86DESC pShwDescr = &pVM->selm.s.paGdtRC[iGDTEntry]; //Log(("O: base=%08X limit=%08X attr=%04X\n", X86DESC_BASE(*pShwDescr)), X86DESC_LIMIT(*pShwDescr), (pShwDescr->au32[1] >> 8) & 0xFFFF )); //Log(("N: base=%08X limit=%08X attr=%04X\n", X86DESC_BASE(Desc)), X86DESC_LIMIT(Desc), (Desc.au32[1] >> 8) & 0xFFFF )); @@ -145,7 +148,8 @@ static VBOXSTRICTRC selmRCSyncGDTEntry(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegF Log(("GDT write to selector in %s register %04X (now stale)\n", g_aszSRegNms[iSReg], paSReg[iSReg].Sel)); paSReg[iSReg].fFlags |= CPUMSELREG_FLAGS_STALE; VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); /* paranoia */ - rcStrict = VINF_EM_RESCHEDULE_REM; + /* rcStrict = VINF_EM_RESCHEDULE_REM; - bad idea if we're in a patch. */ + rcStrict = VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT; } else if (paSReg[iSReg].fFlags & CPUMSELREG_FLAGS_STALE) { @@ -284,6 +288,9 @@ VMMRCDECL(int) selmRCGuestGDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTX if (rc2 == VINF_SUCCESS || rc2 == VINF_EM_RESCHEDULE_REM) { + /* VINF_EM_RESCHEDULE_REM - bad idea if we're in a patch. */ + if (rc2 == VINF_EM_RESCHEDULE_REM) + rc = VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT; STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTHandled); return rc; } @@ -305,8 +312,10 @@ VMMRCDECL(int) selmRCGuestGDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTX STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTUnhandled); return rc; } +#endif /* SELM_TRACK_GUEST_GDT_CHANGES */ +#ifdef SELM_TRACK_GUEST_LDT_CHANGES /** * \#PF Virtual Handler callback for Guest write access to the Guest's own LDT. * @@ -329,8 +338,10 @@ VMMRCDECL(int) selmRCGuestLDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTX STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestLDT); return VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT; } +#endif +#ifdef SELM_TRACK_GUEST_TSS_CHANGES /** * Read wrapper used by selmRCGuestTSSWriteHandler. * @returns VBox status code (appropriate for trap handling and GC return). @@ -381,7 +392,8 @@ VMMRCDECL(int) selmRCGuestTSSWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTX */ uint32_t cb; int rc = EMInterpretInstructionEx(pVCpu, pRegFrame, (RTGCPTR)(RTRCUINTPTR)pvFault, &cb); - if (RT_SUCCESS(rc) && cb) + if ( RT_SUCCESS(rc) + && cb) { rc = VINF_SUCCESS; @@ -402,6 +414,21 @@ VMMRCDECL(int) selmRCGuestTSSWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTX pVM->selm.s.Tss.ss1 = pGuestTss->ss0 | 1; STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged); } +#ifdef VBOX_WITH_RAW_RING1 + else if ( EMIsRawRing1Enabled(pVM) + && PAGE_ADDRESS(&pGuestTss->esp1) == PAGE_ADDRESS(&pGuestTss->padding_ss1) + && PAGE_ADDRESS(&pGuestTss->esp1) == PAGE_ADDRESS((uint8_t *)pGuestTss + offRange) + && ( pGuestTss->esp1 != pVM->selm.s.Tss.esp2 + || pGuestTss->ss1 != ((pVM->selm.s.Tss.ss2 & ~2) | 1)) /* undo raw-r1 */ + ) + { + Log(("selmRCGuestTSSWriteHandler: R1 stack: %RTsel:%RGv -> %RTsel:%RGv\n", + (RTSEL)((pVM->selm.s.Tss.ss2 & ~2) | 1), (RTGCPTR)pVM->selm.s.Tss.esp2, (RTSEL)pGuestTss->ss1, (RTGCPTR)pGuestTss->esp1)); + pVM->selm.s.Tss.esp2 = pGuestTss->esp1; + pVM->selm.s.Tss.ss2 = (pGuestTss->ss1 & ~1) | 2; + STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged); + } +#endif /* Handle misaligned TSS in a safe manner (just in case). */ else if ( offRange >= RT_UOFFSETOF(VBOXTSS, esp0) && offRange < RT_UOFFSETOF(VBOXTSS, padding_ss0)) @@ -491,8 +518,10 @@ VMMRCDECL(int) selmRCGuestTSSWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTX } return rc; } +#endif /* SELM_TRACK_GUEST_TSS_CHANGES */ +#ifdef SELM_TRACK_SHADOW_GDT_CHANGES /** * \#PF Virtual Handler callback for Guest write access to the VBox shadow GDT. * @@ -511,8 +540,10 @@ VMMRCDECL(int) selmRCShadowGDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCT NOREF(pVM); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); return VERR_SELM_SHADOW_GDT_WRITE; } +#endif +#ifdef SELM_TRACK_SHADOW_LDT_CHANGES /** * \#PF Virtual Handler callback for Guest write access to the VBox shadow LDT. * @@ -532,8 +563,10 @@ VMMRCDECL(int) selmRCShadowLDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCT NOREF(pVM); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); return VERR_SELM_SHADOW_LDT_WRITE; } +#endif +#ifdef SELM_TRACK_SHADOW_TSS_CHANGES /** * \#PF Virtual Handler callback for Guest write access to the VBox shadow TSS. * @@ -552,4 +585,5 @@ VMMRCDECL(int) selmRCShadowTSSWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCT NOREF(pVM); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); return VERR_SELM_SHADOW_TSS_WRITE; } +#endif diff --git a/src/VBox/VMM/VMMRC/TRPMRC.cpp b/src/VBox/VMM/VMMRC/TRPMRC.cpp index d19aaeca..0654c7e9 100644 --- a/src/VBox/VMM/VMMRC/TRPMRC.cpp +++ b/src/VBox/VMM/VMMRC/TRPMRC.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2007 Oracle Corporation + * Copyright (C) 2006-2012 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; diff --git a/src/VBox/VMM/VMMRC/TRPMRCHandlers.cpp b/src/VBox/VMM/VMMRC/TRPMRCHandlers.cpp index a2a47b9a..3587db6b 100644 --- a/src/VBox/VMM/VMMRC/TRPMRCHandlers.cpp +++ b/src/VBox/VMM/VMMRC/TRPMRCHandlers.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2012 Oracle Corporation + * Copyright (C) 2006-2013 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -66,7 +66,7 @@ # define TRPM_ENTER_DBG_HOOK(a_iVector) \ uint32_t const fDbgEFlags1 = CPUMRawGetEFlags(pVCpu); \ if (!(fDbgEFlags1 & X86_EFL_IF)) Log(("%s: IF=0 ##\n", __FUNCTION__)); \ - else do {} while(0) + else do {} while (0) # define TRPM_EXIT_DBG_HOOK(a_iVector) \ do { \ uint32_t const fDbgEFlags2 = CPUMRawGetEFlags(pVCpu); \ @@ -180,7 +180,7 @@ static int trpmGCExitTrap(PVM pVM, PVMCPU pVCpu, int rc, PCPUMCTXCORE pRegFrame) { TMTimerPollVoid(pVM, pVCpu); Log2(("TMTimerPoll at %08RX32 - VM_FF_TM_VIRTUAL_SYNC=%d VM_FF_TM_VIRTUAL_SYNC=%d\n", pRegFrame->eip, - VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC), VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER))); + VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC), VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER))); } } else @@ -188,7 +188,7 @@ static int trpmGCExitTrap(PVM pVM, PVMCPU pVCpu, int rc, PCPUMCTXCORE pRegFrame) #endif /* Clear pending inhibit interrupt state if required. (necessary for dispatching interrupts later on) */ - if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) + if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) { Log2(("VM_FF_INHIBIT_INTERRUPTS at %08RX32 successor %RGv\n", pRegFrame->eip, EMGetInhibitInterruptsPC(pVCpu))); if (pRegFrame->eip != EMGetInhibitInterruptsPC(pVCpu)) @@ -207,8 +207,8 @@ static int trpmGCExitTrap(PVM pVM, PVMCPU pVCpu, int rc, PCPUMCTXCORE pRegFrame) * Or pending (A)PIC interrupt? Windows XP will crash if we delay APIC interrupts. */ if ( rc == VINF_SUCCESS - && ( VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC | VM_FF_REQUEST | VM_FF_PGM_NO_MEMORY | VM_FF_PDM_DMA) - || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_TO_R3 | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC + && ( VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC | VM_FF_REQUEST | VM_FF_PGM_NO_MEMORY | VM_FF_PDM_DMA) + || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_TO_R3 | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_REQUEST | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_SELM_SYNC_TSS @@ -217,34 +217,34 @@ static int trpmGCExitTrap(PVM pVM, PVMCPU pVCpu, int rc, PCPUMCTXCORE pRegFrame) ) { /* The out of memory condition naturally outranks the others. */ - if (RT_UNLIKELY(VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))) + if (RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))) rc = VINF_EM_NO_MEMORY; /* Pending Ring-3 action. */ - else if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TO_R3 | VMCPU_FF_PDM_CRITSECT)) + else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TO_R3 | VMCPU_FF_PDM_CRITSECT)) { VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3); rc = VINF_EM_RAW_TO_R3; } /* Pending timer action. */ - else if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER)) + else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER)) rc = VINF_EM_RAW_TIMER_PENDING; /* The Virtual Sync clock has stopped. */ - else if (VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC)) + else if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC)) rc = VINF_EM_RAW_TO_R3; /* DMA work pending? */ - else if (VM_FF_ISPENDING(pVM, VM_FF_PDM_DMA)) + else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA)) rc = VINF_EM_RAW_TO_R3; /* Pending request packets might contain actions that need immediate attention, such as pending hardware interrupts. */ - else if ( VM_FF_ISPENDING(pVM, VM_FF_REQUEST) - || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_REQUEST)) + else if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST) + || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST)) rc = VINF_EM_PENDING_REQUEST; /* Pending GDT/LDT/TSS sync. */ - else if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_SELM_SYNC_TSS)) + else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_SELM_SYNC_TSS)) rc = VINF_SELM_SYNC_GDT; /* Pending interrupt: dispatch it. */ - else if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) - && !VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) + else if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) + && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) && PATMAreInterruptsEnabledByCtxCore(pVM, pRegFrame) ) { @@ -269,12 +269,12 @@ static int trpmGCExitTrap(PVM pVM, PVMCPU pVCpu, int rc, PCPUMCTXCORE pRegFrame) /* * Try sync CR3? */ - else if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)) + else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)) { #if 1 PGMRZDynMapReleaseAutoSet(pVCpu); PGMRZDynMapStartAutoSet(pVCpu); - rc = PGMSyncCR3(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR3(pVCpu), CPUMGetGuestCR4(pVCpu), VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); + rc = PGMSyncCR3(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR3(pVCpu), CPUMGetGuestCR4(pVCpu), VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); #else rc = VINF_PGM_SYNC_CR3; #endif @@ -320,9 +320,15 @@ DECLASM(int) TRPMGCTrap01Handler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame) * Now leave the rest to the DBGF. */ PGMRZDynMapStartAutoSet(pVCpu); - int rc = DBGFRZTrap01Handler(pVM, pVCpu, pRegFrame, uDr6); + int rc = DBGFRZTrap01Handler(pVM, pVCpu, pRegFrame, uDr6, false /*fAltStepping*/); if (rc == VINF_EM_RAW_GUEST_TRAP) - CPUMSetGuestDR6(pVCpu, uDr6); + { + CPUMSetGuestDR6(pVCpu, (CPUMGetGuestDR6(pVCpu) & ~X86_DR6_B_MASK) | uDr6); + if (CPUMGetGuestDR7(pVCpu) & X86_DR7_GD) + CPUMSetGuestDR7(pVCpu, CPUMGetGuestDR7(pVCpu) & ~X86_DR7_GD); + } + else if (rc == VINF_EM_DBG_STEPPED) + pRegFrame->eflags.Bits.u1TF = 0; rc = trpmGCExitTrap(pVM, pVCpu, rc, pRegFrame); Log6(("TRPMGC01: %Rrc (%04x:%08x %RTreg %EFlag=%#x)\n", rc, pRegFrame->cs.Sel, pRegFrame->eip, uDr6, CPUMRawGetEFlags(pVCpu))); @@ -367,8 +373,10 @@ DECLASM(int) TRPMGCHyperTrap01Handler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame) /* * Now leave the rest to the DBGF. */ - int rc = DBGFRZTrap01Handler(pVM, pVCpu, pRegFrame, uDr6); + int rc = DBGFRZTrap01Handler(pVM, pVCpu, pRegFrame, uDr6, false /*fAltStepping*/); AssertStmt(rc != VINF_EM_RAW_GUEST_TRAP, rc = VERR_TRPM_IPE_1); + if (rc == VINF_EM_DBG_STEPPED) + pRegFrame->eflags.Bits.u1TF = 0; Log6(("TRPMGCHyper01: %Rrc (%04x:%08x %RTreg)\n", rc, pRegFrame->cs.Sel, pRegFrame->eip, uDr6)); TRPM_EXIT_DBG_HOOK_HYPER(1); @@ -391,7 +399,9 @@ DECLASM(int) TRPMGCHyperTrap01Handler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame) DECLASM(int) TRPMGCTrap02Handler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame) { LogFlow(("TRPMGCTrap02Handler: cs:eip=%04x:%08x\n", pRegFrame->cs.Sel, pRegFrame->eip)); +#if 0 /* Enable this iff you have a COM port and really want this debug info. */ RTLogComPrintf("TRPMGCTrap02Handler: cs:eip=%04x:%08x\n", pRegFrame->cs.Sel, pRegFrame->eip); +#endif NOREF(pTrpmCpu); return VERR_TRPM_DONT_PANIC; } @@ -415,7 +425,9 @@ DECLASM(int) TRPMGCTrap02Handler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame) DECLASM(int) TRPMGCHyperTrap02Handler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame) { LogFlow(("TRPMGCHyperTrap02Handler: cs:eip=%04x:%08x\n", pRegFrame->cs.Sel, pRegFrame->eip)); +#if 0 /* Enable this iff you have a COM port and really want this debug info. */ RTLogComPrintf("TRPMGCHyperTrap02Handler: cs:eip=%04x:%08x\n", pRegFrame->cs.Sel, pRegFrame->eip); +#endif NOREF(pTrpmCpu); return VERR_TRPM_DONT_PANIC; } @@ -444,8 +456,9 @@ DECLASM(int) TRPMGCTrap03Handler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame) /* * PATM is using INT3s, let them have a go first. */ - if ( (pRegFrame->ss.Sel & X86_SEL_RPL) == 1 - && !pRegFrame->eflags.Bits.u1VM) + if ( ( (pRegFrame->ss.Sel & X86_SEL_RPL) == 1 + || (EMIsRawRing1Enabled(pVM) && (pRegFrame->ss.Sel & X86_SEL_RPL) == 2) ) + && !pRegFrame->eflags.Bits.u1VM) { rc = PATMRCHandleInt3PatchTrap(pVM, pRegFrame); if ( rc == VINF_SUCCESS @@ -522,7 +535,7 @@ DECLASM(int) TRPMGCTrap06Handler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame) TRPM_ENTER_DBG_HOOK(6); PGMRZDynMapStartAutoSet(pVCpu); - if (CPUMGetGuestCPL(pVCpu) == 0) + if (CPUMGetGuestCPL(pVCpu) <= (EMIsRawRing1Enabled(pVM) ? 1U : 0U)) { /* * Decode the instruction. @@ -948,13 +961,14 @@ static int trpmGCTrap0dHandlerRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFram case OP_CLI: { uint32_t efl = CPUMRawGetEFlags(pVCpu); - if (X86_EFL_GET_IOPL(efl) >= (unsigned)(pRegFrame->ss.Sel & X86_SEL_RPL)) + uint32_t cpl = CPUMRCGetGuestCPL(pVCpu, pRegFrame); + if (X86_EFL_GET_IOPL(efl) >= cpl) { LogFlow(("trpmGCTrap0dHandlerRing3: CLI/STI -> REM\n")); TRPM_EXIT_DBG_HOOK(0xd); return trpmGCExitTrap(pVM, pVCpu, VINF_EM_RESCHEDULE_REM, pRegFrame); } - LogFlow(("trpmGCTrap0dHandlerRing3: CLI/STI -> #GP(0)\n")); + LogFlow(("trpmGCTrap0dHandlerRing3: CLI/STI -> #GP(0) iopl=%x, cpl=%x\n", X86_EFL_GET_IOPL(efl), cpl)); break; } } @@ -1061,9 +1075,53 @@ static int trpmGCTrap0dHandler(PVM pVM, PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFram if ( pVCpu->trpm.s.uActiveErrorCode == 0 && (Cpu.pCurInstr->fOpType & DISOPTYPE_PORTIO)) { - VBOXSTRICTRC rcStrict = IOMRCIOPortHandler(pVM, pRegFrame, &Cpu); + VBOXSTRICTRC rcStrict = IOMRCIOPortHandler(pVM, pVCpu, pRegFrame, &Cpu); if (IOM_SUCCESS(rcStrict)) + { pRegFrame->rip += cbOp; + + /* + * Check for I/O breakpoints. A bit clumsy, but should be short lived (moved to IEM). + */ + uint32_t const uDr7 = CPUMGetGuestDR7(pVCpu); + if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK) + && X86_DR7_ANY_RW_IO(uDr7) + && (CPUMGetGuestCR4(pVCpu) & X86_CR4_DE)) + || DBGFBpIsHwIoArmed(pVM))) + { + uint64_t uPort = pRegFrame->dx; + unsigned cbValue; + if ( Cpu.pCurInstr->uOpcode == OP_IN + || Cpu.pCurInstr->uOpcode == OP_INSB + || Cpu.pCurInstr->uOpcode == OP_INSWD) + { + cbValue = DISGetParamSize(&Cpu, &Cpu.Param1); + if (Cpu.Param2.fUse & DISUSE_IMMEDIATE) + uPort = Cpu.Param2.uValue; + } + else + { + cbValue = DISGetParamSize(&Cpu, &Cpu.Param2); + if (Cpu.Param1.fUse & DISUSE_IMMEDIATE) + uPort = Cpu.Param1.uValue; + } + + VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, CPUMCTX_FROM_CORE(pRegFrame), uPort, cbValue); + if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP) + { + /* Raise #DB. */ + TRPMResetTrap(pVCpu); + TRPMAssertTrap(pVCpu, X86_XCPT_DE, TRPM_TRAP); + if (rcStrict) + LogRel(("trpmGCTrap0dHandler: Overriding %Rrc with #DB on I/O port access.\n", VBOXSTRICTRC_VAL(rcStrict))); + rcStrict = VINF_EM_RAW_GUEST_TRAP; + } + /* rcStrict is VINF_SUCCESS or in [VINF_EM_FIRST..VINF_EM_LAST]. */ + else if ( rcStrict2 != VINF_SUCCESS + && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict)) + rcStrict = rcStrict2; + } + } rc = VBOXSTRICTRC_TODO(rcStrict); TRPM_EXIT_DBG_HOOK(0xd); return trpmGCExitTrap(pVM, pVCpu, rc, pRegFrame); @@ -1095,7 +1153,7 @@ static int trpmGCTrap0dHandler(PVM pVM, PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFram Log3(("TRPM #GP V86: cs:eip=%04x:%08x IOPL=%d efl=%08x\n", pRegFrame->cs.Sel, pRegFrame->eip, eflags.Bits.u2IOPL, eflags.u)); if (eflags.Bits.u2IOPL != 3) { - Assert(eflags.Bits.u2IOPL == 0); + Assert(EMIsRawRing1Enabled(pVM) || eflags.Bits.u2IOPL == 0); rc = TRPMForwardTrap(pVCpu, pRegFrame, 0xD, 0, TRPM_TRAP_HAS_ERRORCODE, TRPM_TRAP, 0xd); Assert(rc == VINF_EM_RAW_GUEST_TRAP); @@ -1393,3 +1451,19 @@ DECLCALLBACK(int) trpmRCTrapInGeneric(PVM pVM, PCPUMCTXCORE pRegFrame, uintptr_t return VERR_TRPM_IPE_3; } + +/** + * Generic hyper trap handler that sets the EIP to @a uUser. + * + * @returns VBox status code. (Anything but VINF_SUCCESS will cause guru.) + * @param pVM Pointer to the cross context VM structure. + * @param pRegFrame Pointer to the register frame (within VM) + * @param uUser The user arg, which should be the new EIP address. + */ +extern "C" DECLCALLBACK(int) TRPMRCTrapHyperHandlerSetEIP(PVM pVM, PCPUMCTXCORE pRegFrame, uintptr_t uUser) +{ + AssertReturn(MMHyperIsInsideArea(pVM, uUser), VERR_TRPM_IPE_3); + pRegFrame->eip = uUser; + return VINF_SUCCESS; +} + diff --git a/src/VBox/VMM/VMMRC/TRPMRCHandlersA.asm b/src/VBox/VMM/VMMRC/TRPMRCHandlersA.asm index 57988cf1..78389b91 100644 --- a/src/VBox/VMM/VMMRC/TRPMRCHandlersA.asm +++ b/src/VBox/VMM/VMMRC/TRPMRCHandlersA.asm @@ -296,22 +296,32 @@ GenericTrapErrCode: mov ebx, IMP(g_trpmGuestCtxCore) ; Assume GC as the most common. test byte [%$STK_CS], 3h ; check RPL of the cs selector - ;; @todo check this for conforming segments. - jnz .save_state + jnz .save_guest_state test dword [%$STK_EFLAGS], X86_EFL_VM; If in V86, then guest. - jnz .save_state + jnz .save_guest_state mov ebx, IMP(g_trpmHyperCtxCore) ; It's raw-mode context, actually. ; ; Save the state. ; - ; ASSUMPTION: If trap in hypervisor, we assume that we can read two dword - ; under the bottom of the stack. This is atm safe. - ; -.save_state: +.save_hyper_state: + mov [ebx + CPUMCTXCORE.ecx], ecx + lea eax, [%$STK_ESP] + mov [ebx + CPUMCTXCORE.esp], eax + mov cx, ss + mov [ebx + CPUMCTXCORE.ss.Sel], cx + jmp .save_state_common + +.save_guest_state: + mov [ebx + CPUMCTXCORE.ecx], ecx + mov eax, [%$STK_ESP] + mov [ebx + CPUMCTXCORE.esp], eax + mov cx, [%$STK_SS] + mov [ebx + CPUMCTXCORE.ss.Sel], cx + +.save_state_common: mov eax, [%$STK_SAVED_EAX] mov [ebx + CPUMCTXCORE.eax], eax - mov [ebx + CPUMCTXCORE.ecx], ecx mov [ebx + CPUMCTXCORE.edx], edx mov eax, [%$STK_SAVED_EBX] mov [ebx + CPUMCTXCORE.ebx], eax @@ -319,11 +329,6 @@ GenericTrapErrCode: mov [ebx + CPUMCTXCORE.edi], edi mov [ebx + CPUMCTXCORE.ebp], ebp - mov eax, [%$STK_ESP] - mov [ebx + CPUMCTXCORE.esp], eax - mov cx, [%$STK_SS] - mov [ebx + CPUMCTXCORE.ss.Sel], cx - mov cx, [%$STK_CS] mov [ebx + CPUMCTXCORE.cs.Sel], cx mov eax, [%$STK_EIP] @@ -791,23 +796,33 @@ ti_GenericInterrupt: mov cr0, eax mov ebx, IMP(g_trpmGuestCtxCore) ; Assume GC as the most common. - test byte [%$STK_CS], 3h ; check RPL of the cs selector - ;; @todo check this for conforming segments. - jnz .save_state - test dword [%$STK_EFLAGS], X86_EFL_VM ; If in V86, then guest. - jnz .save_state + test byte [%$STK_CS], 3h ; check RPL of the cs selector + jnz .save_guest_state + test dword [%$STK_EFLAGS], X86_EFL_VM ; If in V86, then guest. + jnz .save_guest_state mov ebx, IMP(g_trpmHyperCtxCore) ; It's raw-mode context, actually. ; ; Save the state. ; - ; ASSUMPTION: If trap in hypervisor, we assume that we can read two dword - ; under the bottom of the stack. This is atm safe. - ; -.save_state: +.save_hyper_state: + mov [ebx + CPUMCTXCORE.ecx], ecx + lea eax, [%$STK_ESP] + mov [ebx + CPUMCTXCORE.esp], eax + mov cx, ss + mov [ebx + CPUMCTXCORE.ss.Sel], cx + jmp .save_state_common + +.save_guest_state: + mov [ebx + CPUMCTXCORE.ecx], ecx + mov eax, [%$STK_ESP] + mov [ebx + CPUMCTXCORE.esp], eax + mov cx, [%$STK_SS] + mov [ebx + CPUMCTXCORE.ss.Sel], cx + +.save_state_common: mov eax, [%$STK_SAVED_EAX] mov [ebx + CPUMCTXCORE.eax], eax - mov [ebx + CPUMCTXCORE.ecx], ecx mov [ebx + CPUMCTXCORE.edx], edx mov eax, [%$STK_SAVED_EBX] mov [ebx + CPUMCTXCORE.ebx], eax @@ -815,11 +830,6 @@ ti_GenericInterrupt: mov [ebx + CPUMCTXCORE.edi], edi mov [ebx + CPUMCTXCORE.ebp], ebp - mov eax, [%$STK_ESP] - mov [ebx + CPUMCTXCORE.esp], eax - mov cx, [%$STK_SS] - mov [ebx + CPUMCTXCORE.ss.Sel], cx - mov cx, [%$STK_CS] mov [ebx + CPUMCTXCORE.cs.Sel], cx mov eax, [%$STK_EIP] diff --git a/src/VBox/VMM/VMMRC/VMMRC.cpp b/src/VBox/VMM/VMMRC/VMMRC.cpp index fb2a2c62..06e20031 100644 --- a/src/VBox/VMM/VMMRC/VMMRC.cpp +++ b/src/VBox/VMM/VMMRC/VMMRC.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2010 Oracle Corporation + * Copyright (C) 2006-2013 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -47,6 +47,8 @@ extern "C" DECLIMPORT(RTLOGGERRC) g_RelLogger; static int vmmGCTest(PVM pVM, unsigned uOperation, unsigned uArg); static DECLCALLBACK(int) vmmGCTestTmpPFHandler(PVM pVM, PCPUMCTXCORE pRegFrame); static DECLCALLBACK(int) vmmGCTestTmpPFHandlerCorruptFS(PVM pVM, PCPUMCTXCORE pRegFrame); +DECLASM(bool) vmmRCSafeMsrRead(uint32_t uMsr, uint64_t *pu64Value); +DECLASM(bool) vmmRCSafeMsrWrite(uint32_t uMsr, uint64_t u64Value); @@ -69,18 +71,23 @@ VMMRCDECL(int) VMMGCEntry(PVM pVM, unsigned uOperation, unsigned uArg, ...) case VMMGC_DO_VMMGC_INIT: { /* - * Validate the svn revision (uArg). + * Validate the svn revision (uArg) and build type (ellipsis). */ if (uArg != VMMGetSvnRev()) return VERR_VMM_RC_VERSION_MISMATCH; + va_list va; + va_start(va, uArg); + + uint32_t uBuildType = va_arg(va, uint32_t); + if (uBuildType != vmmGetBuildType()) + return VERR_VMM_RC_VERSION_MISMATCH; + /* * Initialize the runtime. - * (The program timestamp is found in the elipsis.) */ - va_list va; - va_start(va, uArg); uint64_t u64TS = va_arg(va, uint64_t); + va_end(va); int rc = RTRCInit(u64TS); @@ -119,7 +126,7 @@ VMMRCDECL(int) VMMGCEntry(PVM pVM, unsigned uOperation, unsigned uArg, ...) /* * Testcase executes a privileged instruction to force a world switch. (in both SVM & VMX) */ - case VMMGC_DO_TESTCASE_HWACCM_NOP: + case VMMGC_DO_TESTCASE_HM_NOP: ASMRdMsr_Low(MSR_IA32_SYSENTER_CS); return 0; @@ -338,6 +345,82 @@ static int vmmGCTest(PVM pVM, unsigned uOperation, unsigned uArg) } + +/** + * Reads a range of MSRs. + * + * This is called directly via VMMR3CallRC. + * + * @returns VBox status code. + * @param pVM The VM handle. + * @param uMsr The MSR to start at. + * @param cMsrs The number of MSRs to read. + * @param paResults Where to store the results. This must be large + * enough to hold at least @a cMsrs result values. + */ +extern "C" VMMRCDECL(int) +VMMRCTestReadMsrs(PVM pVM, uint32_t uMsr, uint32_t cMsrs, PVMMTESTMSRENTRY paResults) +{ + AssertReturn(cMsrs <= 16384, VERR_INVALID_PARAMETER); + AssertPtrReturn(paResults, VERR_INVALID_POINTER); + ASMIntEnable(); /* Run with interrupts enabled, so we can query more MSRs in one block. */ + + for (uint32_t i = 0; i < cMsrs; i++, uMsr++) + { + if (vmmRCSafeMsrRead(uMsr, &paResults[i].uValue)) + paResults[i].uMsr = uMsr; + else + paResults[i].uMsr = UINT64_MAX; + } + + ASMIntDisable(); + return VINF_SUCCESS; +} + + +/** + * Tries to write the given value to an MSR, returns the effect and restors the + * original value. + * + * This is called directly via VMMR3CallRC. + * + * @returns VBox status code. + * @param pVM The VM handle. + * @param uMsr The MSR to start at. + * @param u32ValueLow The low part of the value to write. + * @param u32ValueHi The high part of the value to write. + * @param puValueBefore The value before writing. + * @param puValueAfter The value read back after writing. + */ +extern "C" VMMRCDECL(int) +VMMRCTestTestWriteMsr(PVM pVM, uint32_t uMsr, uint32_t u32ValueLow, uint32_t u32ValueHi, + uint64_t *puValueBefore, uint64_t *puValueAfter) +{ + AssertPtrReturn(puValueBefore, VERR_INVALID_POINTER); + AssertPtrReturn(puValueAfter, VERR_INVALID_POINTER); + ASMIntDisable(); + + int rc = VINF_SUCCESS; + uint64_t uValueBefore = UINT64_MAX; + uint64_t uValueAfter = UINT64_MAX; + if (vmmRCSafeMsrRead(uMsr, &uValueBefore)) + { + if (!vmmRCSafeMsrWrite(uMsr, RT_MAKE_U64(u32ValueLow, u32ValueHi))) + rc = VERR_WRITE_PROTECT; + if (!vmmRCSafeMsrRead(uMsr, &uValueAfter) && RT_SUCCESS(rc)) + rc = VERR_READ_ERROR; + vmmRCSafeMsrWrite(uMsr, uValueBefore); + } + else + rc = VERR_ACCESS_DENIED; + + *puValueBefore = uValueBefore; + *puValueAfter = uValueAfter; + return rc; +} + + + /** * Temporary \#PF trap handler for the \#PF test case. * diff --git a/src/VBox/VMM/VMMRC/VMMRC.def b/src/VBox/VMM/VMMRC/VMMRC.def index c8179eda..76dcb19e 100644 --- a/src/VBox/VMM/VMMRC/VMMRC.def +++ b/src/VBox/VMM/VMMRC/VMMRC.def @@ -3,7 +3,7 @@ ; VMM Raw-mode Context DLL - Definition file. ; -; Copyright (C) 2006-2009 Oracle Corporation +; Copyright (C) 2006-2012 Oracle Corporation ; ; This file is part of VirtualBox Open Source Edition (OSE), as ; available from http://www.virtualbox.org. This file is free software; @@ -75,11 +75,6 @@ EXPORTS vmmGCTestTrap8_FaultEIP VMSetError VMSetErrorV - VMXGCStartVM64 - SVMGCVMRun64 - HWACCMSaveGuestFPU64 - HWACCMSaveGuestDebug64 - HWACCMTestSwitcher64 ; runtime nocrt_memchr diff --git a/src/VBox/VMM/VMMRC/VMMRC.mac b/src/VBox/VMM/VMMRC/VMMRC.mac index 2fe1e4f5..1584bcf7 100644 --- a/src/VBox/VMM/VMMRC/VMMRC.mac +++ b/src/VBox/VMM/VMMRC/VMMRC.mac @@ -4,7 +4,7 @@ ; ; -; Copyright (C) 2006-2007 Oracle Corporation +; Copyright (C) 2006-2012 Oracle Corporation ; ; This file is part of VirtualBox Open Source Edition (OSE), as ; available from http://www.virtualbox.org. This file is free software; diff --git a/src/VBox/VMM/VMMRC/VMMRC0.asm b/src/VBox/VMM/VMMRC/VMMRC0.asm index ce8bb078..bd3cf694 100644 --- a/src/VBox/VMM/VMMRC/VMMRC0.asm +++ b/src/VBox/VMM/VMMRC/VMMRC0.asm @@ -4,7 +4,7 @@ ; ; -; Copyright (C) 2006-2007 Oracle Corporation +; Copyright (C) 2006-2010 Oracle Corporation ; ; This file is part of VirtualBox Open Source Edition (OSE), as ; available from http://www.virtualbox.org. This file is free software; diff --git a/src/VBox/VMM/VMMRC/VMMRC99.asm b/src/VBox/VMM/VMMRC/VMMRC99.asm index c34479da..6dd4454a 100644 --- a/src/VBox/VMM/VMMRC/VMMRC99.asm +++ b/src/VBox/VMM/VMMRC/VMMRC99.asm @@ -3,7 +3,7 @@ ; VMMGC99 - The last object module in the link. ; -; Copyright (C) 2006-2007 Oracle Corporation +; Copyright (C) 2006-2010 Oracle Corporation ; ; This file is part of VirtualBox Open Source Edition (OSE), as ; available from http://www.virtualbox.org. This file is free software; diff --git a/src/VBox/VMM/VMMRC/VMMRCA.asm b/src/VBox/VMM/VMMRC/VMMRCA.asm index 106e08e8..ad523a48 100644 --- a/src/VBox/VMM/VMMRC/VMMRCA.asm +++ b/src/VBox/VMM/VMMRC/VMMRCA.asm @@ -23,6 +23,7 @@ %include "VBox/sup.mac" %include "VBox/vmm/vm.mac" %include "VMMInternal.mac" +%include "VMMRC.mac" ;******************************************************************************* @@ -56,6 +57,7 @@ extern IMPNAME(g_Logger) extern IMPNAME(g_RelLogger) extern NAME(RTLogLogger) extern NAME(vmmRCProbeFireHelper) +extern NAME(TRPMRCTrapHyperHandlerSetEIP) BEGINCODE @@ -222,6 +224,89 @@ ENDPROC vmmGCTestTrap0e ;; +; Safely reads an MSR. +; @returns boolean +; @param uMsr The MSR to red. +; @param pu64Value Where to return the value on success. +; +GLOBALNAME vmmRCSafeMsrRead + push ebp + mov ebp, esp + pushf + cli + push esi + push edi + push ebx + push ebp + + mov ecx, [ebp + 8] ; The MSR to read. + mov eax, 0deadbeefh + mov edx, 0deadbeefh + +TRPM_GP_HANDLER NAME(TRPMRCTrapHyperHandlerSetEIP), .trapped + rdmsr + + mov ecx, [ebp + 0ch] ; Where to store the result. + mov [ecx], eax + mov [ecx + 4], edx + + mov eax, 1 +.return: + pop ebp + pop ebx + pop edi + pop esi + popf + leave + ret + +.trapped: + mov eax, 0 + jmp .return +ENDPROC vmmRCSafeMsrRead + + +;; +; Safely writes an MSR. +; @returns boolean +; @param uMsr The MSR to red. +; @param u64Value The value to write. +; +GLOBALNAME vmmRCSafeMsrWrite + push ebp + mov ebp, esp + pushf + cli + push esi + push edi + push ebx + push ebp + + mov ecx, [ebp + 8] ; The MSR to write to. + mov eax, [ebp + 12] ; The value to write. + mov edx, [ebp + 16] + +TRPM_GP_HANDLER NAME(TRPMRCTrapHyperHandlerSetEIP), .trapped + wrmsr + + mov eax, 1 +.return: + pop ebp + pop ebx + pop edi + pop esi + popf + leave + ret + +.trapped: + mov eax, 0 + jmp .return +ENDPROC vmmRCSafeMsrWrite + + + +;; ; The raw-mode context equivalent of SUPTracerFireProbe. ; ; See also SUPLibTracerA.asm. diff --git a/src/VBox/VMM/VMMRC/VMMRCBuiltin.def b/src/VBox/VMM/VMMRC/VMMRCBuiltin.def index 47e87662..374a81ac 100644 --- a/src/VBox/VMM/VMMRC/VMMRCBuiltin.def +++ b/src/VBox/VMM/VMMRC/VMMRCBuiltin.def @@ -3,7 +3,7 @@ ; VMM Raw-mode Context Builtin DLL - Definition file for generating import library. ; -; Copyright (C) 2006-2007 Oracle Corporation +; Copyright (C) 2006-2012 Oracle Corporation ; ; This file is part of VirtualBox Open Source Edition (OSE), as ; available from http://www.virtualbox.org. This file is free software; diff --git a/src/VBox/VMM/VMMRC/VMMRCDeps.cpp b/src/VBox/VMM/VMMRC/VMMRCDeps.cpp index 82819207..374af5dc 100644 --- a/src/VBox/VMM/VMMRC/VMMRCDeps.cpp +++ b/src/VBox/VMM/VMMRC/VMMRCDeps.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2010 Oracle Corporation + * Copyright (C) 2006-2012 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; |
